From 0893ca1584a41ea632214dbcaf8ae134b990f9e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 26 Feb 2024 11:32:35 +0100 Subject: [PATCH 01/86] frame-support: Improve error reporting when having too many pallets (#3478) Instead of only generating the error, we now generate the actual code and the error. This generates in total less errors and helps the user to identify the actual problem and not being confronted with tons of errors. --- .../procedural/src/construct_runtime/mod.rs | 43 ++++--- ...umber_of_pallets_exceeds_tuple_size.stderr | 106 ++++-------------- 2 files changed, 49 insertions(+), 100 deletions(-) diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 54ed15f7b1d..6e95fdf116a 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -233,25 +233,38 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { let input_copy = input.clone(); let definition = syn::parse_macro_input!(input as RuntimeDeclaration); - let res = match definition { - RuntimeDeclaration::Implicit(implicit_def) => - check_pallet_number(input_copy.clone().into(), implicit_def.pallets.len()).and_then( - |_| construct_runtime_implicit_to_explicit(input_copy.into(), implicit_def), - ), - RuntimeDeclaration::Explicit(explicit_decl) => check_pallet_number( - input_copy.clone().into(), - explicit_decl.pallets.len(), - ) - .and_then(|_| { - construct_runtime_explicit_to_explicit_expanded(input_copy.into(), explicit_decl) - }), - RuntimeDeclaration::ExplicitExpanded(explicit_decl) => - check_pallet_number(input_copy.into(), explicit_decl.pallets.len()) - .and_then(|_| construct_runtime_final_expansion(explicit_decl)), + let (check_pallet_number_res, res) = match definition { + RuntimeDeclaration::Implicit(implicit_def) => ( + check_pallet_number(input_copy.clone().into(), implicit_def.pallets.len()), + construct_runtime_implicit_to_explicit(input_copy.into(), implicit_def), + ), + RuntimeDeclaration::Explicit(explicit_decl) => ( + check_pallet_number(input_copy.clone().into(), explicit_decl.pallets.len()), + construct_runtime_explicit_to_explicit_expanded(input_copy.into(), explicit_decl), + ), + RuntimeDeclaration::ExplicitExpanded(explicit_decl) => ( + check_pallet_number(input_copy.into(), explicit_decl.pallets.len()), + construct_runtime_final_expansion(explicit_decl), + ), }; let res = res.unwrap_or_else(|e| e.to_compile_error()); + // We want to provide better error messages to the user and thus, handle the error here + // separately. If there is an error, we print the error and still generate all of the code to + // get in overall less errors for the user. + let res = if let Err(error) = check_pallet_number_res { + let error = error.to_compile_error(); + + quote! { + #error + + #res + } + } else { + res + }; + let res = expander::Expander::new("construct_runtime") .dry(std::env::var("EXPAND_MACROS").is_err()) .verbose(true) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr index 3b6329c650f..6160f8234a3 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr @@ -4,88 +4,24 @@ error: The number of pallets exceeds the maximum number of tuple elements. To in 67 | pub struct Runtime | ^^^ -error[E0412]: cannot find type `RuntimeCall` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:35:64 - | -35 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | ^^^^^^^^^^^ not found in this scope - | -help: you might be missing a type parameter - | -35 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | +++++++++++++ - -error[E0412]: cannot find type `Runtime` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:37:25 - | -37 | impl pallet::Config for Runtime {} - | ^^^^^^^ not found in this scope - -error[E0412]: cannot find type `Runtime` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:40:31 - | -40 | impl frame_system::Config for Runtime { - | ^^^^^^^ not found in this scope - -error[E0412]: cannot find type `RuntimeOrigin` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:42:23 - | -42 | type RuntimeOrigin = RuntimeOrigin; - | ^^^^^^^^^^^^^ - | -help: you might have meant to use the associated type - | -42 | type RuntimeOrigin = Self::RuntimeOrigin; - | ++++++ - -error[E0412]: cannot find type `RuntimeCall` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:44:21 - | -44 | type RuntimeCall = RuntimeCall; - | ^^^^^^^^^^^ - | -help: you might have meant to use the associated type - | -44 | type RuntimeCall = Self::RuntimeCall; - | ++++++ - -error[E0412]: cannot find type `RuntimeEvent` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:50:22 - | -50 | type RuntimeEvent = RuntimeEvent; - | ^^^^^^^^^^^^ - | -help: you might have meant to use the associated type - | -50 | type RuntimeEvent = Self::RuntimeEvent; - | ++++++ - -error[E0412]: cannot find type `PalletInfo` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:56:20 - | -56 | type PalletInfo = PalletInfo; - | ^^^^^^^^^^ - | -help: you might have meant to use the associated type - | -56 | type PalletInfo = Self::PalletInfo; - | ++++++ -help: consider importing one of these items - | -18 + use frame_benchmarking::__private::traits::PalletInfo; - | -18 + use frame_support::traits::PalletInfo; - | - -error[E0412]: cannot find type `RuntimeTask` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:39:1 - | -39 | #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | - = note: this error originates in the macro `frame_system::config_preludes::TestDefaultConfig` which comes from the expansion of the macro `frame_support::macro_magic::forward_tokens_verbatim` (in Nightly builds, run with -Z macro-backtrace for more info) -help: you might have meant to use the associated type - --> $WORKSPACE/substrate/frame/system/src/lib.rs - | - | type Self::RuntimeTask = (); - | ++++++ +error: recursion limit reached while expanding `frame_support::__private::tt_return!` + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:22:1 + | +22 | / #[frame_support::pallet] +23 | | mod pallet { +24 | | #[pallet::config] +25 | | pub trait Config: frame_system::Config {} +... | +66 | |/ construct_runtime! { +67 | || pub struct Runtime +68 | || { +69 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, +... || +180 | || } +181 | || } + | ||_^ + | |_| + | in this macro invocation + | + = help: consider increasing the recursion limit by adding a `#![recursion_limit = "256"]` attribute to your crate (`$CRATE`) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -- GitLab From 6c5a42a690f96d59dbdf94640188f5d5b5cc47e2 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 26 Feb 2024 12:45:30 +0100 Subject: [PATCH 02/86] Introduce Notification block pinning limit (#2935) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While investigating some pruning issues I found some room for improvement in the notification pin handling. **Problem:** It was not possible to define an upper limit on notification pins. The block pinning cache has a limit, but only handles bodies and justifications. After this PR, bookkeeping for notifications is managed in the pinning worker. A limit can be defined in the worker. If that limit is crossed, blocks that were pinned for that notification are unpinned, which now affects the state as well as bodies and justifications. The pinned blocks cache still has a limit, but should never be hit. closes #19 --------- Co-authored-by: Bastian KΓΆcher Co-authored-by: AndrΓ© Silva <123550+andresilva@users.noreply.github.com> --- Cargo.lock | 1 + substrate/client/api/src/client.rs | 27 +- substrate/client/db/src/lib.rs | 2 +- .../client/db/src/pinned_blocks_cache.rs | 2 +- substrate/client/service/Cargo.toml | 1 + substrate/client/service/src/client/client.rs | 53 +-- substrate/client/service/src/client/mod.rs | 1 + .../src/client/notification_pinning.rs | 353 ++++++++++++++++++ 8 files changed, 406 insertions(+), 34 deletions(-) create mode 100644 substrate/client/service/src/client/notification_pinning.rs diff --git a/Cargo.lock b/Cargo.lock index fae9eacdab9..410b45d0018 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16729,6 +16729,7 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", + "schnellru", "serde", "serde_json", "sp-api", diff --git a/substrate/client/api/src/client.rs b/substrate/client/api/src/client.rs index 46232c74539..2de09840e4d 100644 --- a/substrate/client/api/src/client.rs +++ b/substrate/client/api/src/client.rs @@ -278,7 +278,7 @@ impl fmt::Display for UsageInfo { pub struct UnpinHandleInner { /// Hash of the block pinned by this handle hash: Block::Hash, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, } impl Debug for UnpinHandleInner { @@ -291,7 +291,7 @@ impl UnpinHandleInner { /// Create a new [`UnpinHandleInner`] pub fn new( hash: Block::Hash, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> Self { Self { hash, unpin_worker_sender } } @@ -299,12 +299,25 @@ impl UnpinHandleInner { impl Drop for UnpinHandleInner { fn drop(&mut self) { - if let Err(err) = self.unpin_worker_sender.unbounded_send(self.hash) { + if let Err(err) = + self.unpin_worker_sender.unbounded_send(UnpinWorkerMessage::Unpin(self.hash)) + { log::debug!(target: "db", "Unable to unpin block with hash: {}, error: {:?}", self.hash, err); }; } } +/// Message that signals notification-based pinning actions to the pinning-worker. +/// +/// When the notification is dropped, an `Unpin` message should be sent to the worker. +#[derive(Debug)] +pub enum UnpinWorkerMessage { + /// Should be sent when a import or finality notification is created. + AnnouncePin(Block::Hash), + /// Should be sent when a import or finality notification is dropped. + Unpin(Block::Hash), +} + /// Keeps a specific block pinned while the handle is alive. /// Once the last handle instance for a given block is dropped, the /// block is unpinned in the [`Backend`](crate::backend::Backend::unpin_block). @@ -315,7 +328,7 @@ impl UnpinHandle { /// Create a new [`UnpinHandle`] pub fn new( hash: Block::Hash, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> UnpinHandle { UnpinHandle(Arc::new(UnpinHandleInner::new(hash, unpin_worker_sender))) } @@ -353,7 +366,7 @@ impl BlockImportNotification { header: Block::Header, is_new_best: bool, tree_route: Option>>, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> Self { Self { hash, @@ -412,7 +425,7 @@ impl FinalityNotification { /// Create finality notification from finality summary. pub fn from_summary( mut summary: FinalizeSummary, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> FinalityNotification { let hash = summary.finalized.pop().unwrap_or_default(); FinalityNotification { @@ -436,7 +449,7 @@ impl BlockImportNotification { /// Create finality notification from finality summary. pub fn from_summary( summary: ImportSummary, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> BlockImportNotification { let hash = summary.hash; BlockImportNotification { diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 870b4150b9d..0faa90dfc4f 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -2531,7 +2531,7 @@ impl sc_client_api::backend::Backend for Backend { self.storage.state_db.pin(&hash, number.saturated_into::(), hint).map_err( |_| { sp_blockchain::Error::UnknownBlock(format!( - "State already discarded for `{:?}`", + "Unable to pin: state already discarded for `{:?}`", hash )) }, diff --git a/substrate/client/db/src/pinned_blocks_cache.rs b/substrate/client/db/src/pinned_blocks_cache.rs index 46c9287fb19..ac4aad07765 100644 --- a/substrate/client/db/src/pinned_blocks_cache.rs +++ b/substrate/client/db/src/pinned_blocks_cache.rs @@ -20,7 +20,7 @@ use schnellru::{Limiter, LruMap}; use sp_runtime::{traits::Block as BlockT, Justifications}; const LOG_TARGET: &str = "db::pin"; -const PINNING_CACHE_SIZE: usize = 1024; +const PINNING_CACHE_SIZE: usize = 2048; /// Entry for pinned blocks cache. struct PinnedBlockCacheEntry { diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index 73edceb2ef3..bbf67d1fbd0 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -84,6 +84,7 @@ tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "tim tempfile = "3.1.0" directories = "5.0.1" static_init = "1.0.3" +schnellru = "0.2.1" [dev-dependencies] substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 108c258595a..35e8b53a09c 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -19,8 +19,8 @@ //! Substrate Client use super::block_rules::{BlockRules, LookupResult as BlockLookupResult}; -use futures::{FutureExt, StreamExt}; -use log::{error, info, trace, warn}; +use crate::client::notification_pinning::NotificationPinningWorker; +use log::{debug, info, trace, warn}; use parking_lot::{Mutex, RwLock}; use prometheus_endpoint::Registry; use rand::Rng; @@ -38,7 +38,7 @@ use sc_client_api::{ execution_extensions::ExecutionExtensions, notifications::{StorageEventStream, StorageNotifications}, CallExecutor, ExecutorProvider, KeysIter, OnFinalityAction, OnImportAction, PairsIter, - ProofProvider, UsageProvider, + ProofProvider, UnpinWorkerMessage, UsageProvider, }; use sc_consensus::{ BlockCheckParams, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction, @@ -114,7 +114,7 @@ where block_rules: BlockRules, config: ClientConfig, telemetry: Option, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, _phantom: PhantomData, } @@ -326,19 +326,35 @@ where // dropped, the block will be unpinned automatically. if let Some(ref notification) = finality_notification { if let Err(err) = self.backend.pin_block(notification.hash) { - error!( + debug!( "Unable to pin block for finality notification. hash: {}, Error: {}", notification.hash, err ); - }; + } else { + let _ = self + .unpin_worker_sender + .unbounded_send(UnpinWorkerMessage::AnnouncePin(notification.hash)) + .map_err(|e| { + log::error!( + "Unable to send AnnouncePin worker message for finality: {e}" + ) + }); + } } if let Some(ref notification) = import_notification { if let Err(err) = self.backend.pin_block(notification.hash) { - error!( + debug!( "Unable to pin block for import notification. hash: {}, Error: {}", notification.hash, err ); + } else { + let _ = self + .unpin_worker_sender + .unbounded_send(UnpinWorkerMessage::AnnouncePin(notification.hash)) + .map_err(|e| { + log::error!("Unable to send AnnouncePin worker message for import: {e}") + }); }; } @@ -416,25 +432,12 @@ where backend.commit_operation(op)?; } - let (unpin_worker_sender, mut rx) = - tracing_unbounded::("unpin-worker-channel", 10_000); - let task_backend = Arc::downgrade(&backend); - spawn_handle.spawn( - "unpin-worker", - None, - async move { - while let Some(message) = rx.next().await { - if let Some(backend) = task_backend.upgrade() { - backend.unpin_block(message); - } else { - log::debug!("Terminating unpin-worker, backend reference was dropped."); - return - } - } - log::debug!("Terminating unpin-worker, stream terminated.") - } - .boxed(), + let (unpin_worker_sender, rx) = tracing_unbounded::>( + "notification-pinning-worker-channel", + 10_000, ); + let unpin_worker = NotificationPinningWorker::new(rx, backend.clone()); + spawn_handle.spawn("notification-pinning-worker", None, Box::pin(unpin_worker.run())); Ok(Client { backend, diff --git a/substrate/client/service/src/client/mod.rs b/substrate/client/service/src/client/mod.rs index a13fd4317e1..0703cc2b47d 100644 --- a/substrate/client/service/src/client/mod.rs +++ b/substrate/client/service/src/client/mod.rs @@ -47,6 +47,7 @@ mod block_rules; mod call_executor; mod client; +mod notification_pinning; mod wasm_override; mod wasm_substitutes; diff --git a/substrate/client/service/src/client/notification_pinning.rs b/substrate/client/service/src/client/notification_pinning.rs new file mode 100644 index 00000000000..80de91c02f1 --- /dev/null +++ b/substrate/client/service/src/client/notification_pinning.rs @@ -0,0 +1,353 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Notification pinning related logic. +//! +//! This file contains a worker that should be started when a new client instance is created. +//! The goal is to avoid pruning of blocks that have active notifications in the node. Every +//! recipient of notifications should receive the chance to act upon them. In addition, notification +//! listeners can hold onto a [`sc_client_api::UnpinHandle`] to keep a block pinned. Once the handle +//! is dropped, a message is sent and the worker unpins the respective block. +use std::{ + marker::PhantomData, + sync::{Arc, Weak}, +}; + +use futures::StreamExt; +use sc_client_api::{Backend, UnpinWorkerMessage}; + +use sc_utils::mpsc::TracingUnboundedReceiver; +use schnellru::Limiter; +use sp_runtime::traits::Block as BlockT; + +const LOG_TARGET: &str = "db::notification_pinning"; +const NOTIFICATION_PINNING_LIMIT: usize = 1024; + +/// A limiter which automatically unpins blocks that leave the data structure. +#[derive(Clone, Debug)] +struct UnpinningByLengthLimiter> { + max_length: usize, + backend: Weak, + _phantom: PhantomData, +} + +impl> UnpinningByLengthLimiter { + /// Creates a new length limiter with a given `max_length`. + pub fn new(max_length: usize, backend: Weak) -> UnpinningByLengthLimiter { + UnpinningByLengthLimiter { max_length, backend, _phantom: PhantomData::::default() } + } +} + +impl> Limiter + for UnpinningByLengthLimiter +{ + type KeyToInsert<'a> = Block::Hash; + type LinkType = usize; + + fn is_over_the_limit(&self, length: usize) -> bool { + length > self.max_length + } + + fn on_insert( + &mut self, + _length: usize, + key: Self::KeyToInsert<'_>, + value: u32, + ) -> Option<(Block::Hash, u32)> { + log::debug!(target: LOG_TARGET, "Pinning block based on notification. hash = {key}"); + if self.max_length > 0 { + Some((key, value)) + } else { + None + } + } + + fn on_replace( + &mut self, + _length: usize, + _old_key: &mut Block::Hash, + _new_key: Block::Hash, + _old_value: &mut u32, + _new_value: &mut u32, + ) -> bool { + true + } + + fn on_removed(&mut self, key: &mut Block::Hash, references: &mut u32) { + // If reference count was larger than 0 on removal, + // the item was removed due to capacity limitations. + // Since the cache should be large enough for pinned items, + // we want to know about these evictions. + if *references > 0 { + log::warn!( + target: LOG_TARGET, + "Notification block pinning limit reached. Unpinning block with hash = {key:?}" + ); + if let Some(backend) = self.backend.upgrade() { + (0..*references).for_each(|_| backend.unpin_block(*key)); + } + } else { + log::trace!( + target: LOG_TARGET, + "Unpinned block. hash = {key:?}", + ) + } + } + + fn on_cleared(&mut self) {} + + fn on_grow(&mut self, _new_memory_usage: usize) -> bool { + true + } +} + +/// Worker for the handling of notification pinning. +/// +/// It receives messages from a receiver and pins/unpins based on the incoming messages. +/// All notification related unpinning should go through this worker. If the maximum number of +/// notification pins is reached, the block from the oldest notification is unpinned. +pub struct NotificationPinningWorker> { + unpin_message_rx: TracingUnboundedReceiver>, + task_backend: Weak, + pinned_blocks: schnellru::LruMap>, +} + +impl> NotificationPinningWorker { + /// Creates a new `NotificationPinningWorker`. + pub fn new( + unpin_message_rx: TracingUnboundedReceiver>, + task_backend: Arc, + ) -> Self { + let pinned_blocks = + schnellru::LruMap::>::new( + UnpinningByLengthLimiter::new( + NOTIFICATION_PINNING_LIMIT, + Arc::downgrade(&task_backend), + ), + ); + Self { unpin_message_rx, task_backend: Arc::downgrade(&task_backend), pinned_blocks } + } + + fn handle_announce_message(&mut self, hash: Block::Hash) { + if let Some(entry) = self.pinned_blocks.get_or_insert(hash, Default::default) { + *entry = *entry + 1; + } + } + + fn handle_unpin_message(&mut self, hash: Block::Hash) -> Result<(), ()> { + if let Some(refcount) = self.pinned_blocks.peek_mut(&hash) { + *refcount = *refcount - 1; + if *refcount == 0 { + self.pinned_blocks.remove(&hash); + } + if let Some(backend) = self.task_backend.upgrade() { + log::debug!(target: LOG_TARGET, "Reducing pinning refcount for block hash = {hash:?}"); + backend.unpin_block(hash); + } else { + log::debug!(target: LOG_TARGET, "Terminating unpin-worker, backend reference was dropped."); + return Err(()) + } + } else { + log::debug!(target: LOG_TARGET, "Received unpin message for already unpinned block. hash = {hash:?}"); + } + Ok(()) + } + + /// Start working on the received messages. + /// + /// The worker maintains a map which keeps track of the pinned blocks and their reference count. + /// Depending upon the received message, it acts to pin/unpin the block. + pub async fn run(mut self) { + while let Some(message) = self.unpin_message_rx.next().await { + match message { + UnpinWorkerMessage::AnnouncePin(hash) => self.handle_announce_message(hash), + UnpinWorkerMessage::Unpin(hash) => + if self.handle_unpin_message(hash).is_err() { + return + }, + } + } + log::debug!(target: LOG_TARGET, "Terminating unpin-worker, stream terminated.") + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use sc_client_api::{Backend, UnpinWorkerMessage}; + use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; + use sp_core::H256; + use sp_runtime::traits::Block as BlockT; + + type Block = substrate_test_runtime_client::runtime::Block; + + use super::{NotificationPinningWorker, UnpinningByLengthLimiter}; + + impl> NotificationPinningWorker { + fn new_with_limit( + unpin_message_rx: TracingUnboundedReceiver>, + task_backend: Arc, + limit: usize, + ) -> Self { + let pinned_blocks = + schnellru::LruMap::>::new( + UnpinningByLengthLimiter::new(limit, Arc::downgrade(&task_backend)), + ); + Self { unpin_message_rx, task_backend: Arc::downgrade(&task_backend), pinned_blocks } + } + + fn lru( + &self, + ) -> &schnellru::LruMap> { + &self.pinned_blocks + } + } + + #[test] + fn pinning_worker_handles_base_case() { + let (_tx, rx) = tracing_unbounded("testing", 1000); + + let backend = Arc::new(sc_client_api::in_mem::Backend::::new()); + + let hash = H256::random(); + + let mut worker = NotificationPinningWorker::new(rx, backend.clone()); + + // Block got pinned and unpin message should unpin in the backend. + let _ = backend.pin_block(hash); + assert_eq!(backend.pin_refs(&hash), Some(1)); + + worker.handle_announce_message(hash); + assert_eq!(worker.lru().len(), 1); + + let _ = worker.handle_unpin_message(hash); + + assert_eq!(backend.pin_refs(&hash), Some(0)); + assert!(worker.lru().is_empty()); + } + + #[test] + fn pinning_worker_handles_multiple_pins() { + let (_tx, rx) = tracing_unbounded("testing", 1000); + + let backend = Arc::new(sc_client_api::in_mem::Backend::::new()); + + let hash = H256::random(); + + let mut worker = NotificationPinningWorker::new(rx, backend.clone()); + // Block got pinned multiple times. + let _ = backend.pin_block(hash); + let _ = backend.pin_block(hash); + let _ = backend.pin_block(hash); + assert_eq!(backend.pin_refs(&hash), Some(3)); + + worker.handle_announce_message(hash); + worker.handle_announce_message(hash); + worker.handle_announce_message(hash); + assert_eq!(worker.lru().len(), 1); + + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(2)); + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(1)); + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(0)); + assert!(worker.lru().is_empty()); + + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(0)); + } + + #[test] + fn pinning_worker_handles_too_many_unpins() { + let (_tx, rx) = tracing_unbounded("testing", 1000); + + let backend = Arc::new(sc_client_api::in_mem::Backend::::new()); + + let hash = H256::random(); + let hash2 = H256::random(); + + let mut worker = NotificationPinningWorker::new(rx, backend.clone()); + // Block was announced once but unpinned multiple times. The worker should ignore the + // additional unpins. + let _ = backend.pin_block(hash); + let _ = backend.pin_block(hash); + let _ = backend.pin_block(hash); + assert_eq!(backend.pin_refs(&hash), Some(3)); + + worker.handle_announce_message(hash); + assert_eq!(worker.lru().len(), 1); + + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(2)); + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(2)); + assert!(worker.lru().is_empty()); + + let _ = worker.handle_unpin_message(hash2); + assert!(worker.lru().is_empty()); + assert_eq!(backend.pin_refs(&hash2), None); + } + + #[test] + fn pinning_worker_should_evict_when_limit_reached() { + let (_tx, rx) = tracing_unbounded("testing", 1000); + + let backend = Arc::new(sc_client_api::in_mem::Backend::::new()); + + let hash1 = H256::random(); + let hash2 = H256::random(); + let hash3 = H256::random(); + let hash4 = H256::random(); + + // Only two items fit into the cache. + let mut worker = NotificationPinningWorker::new_with_limit(rx, backend.clone(), 2); + + // Multiple blocks are announced but the cache size is too small. We expect that blocks + // are evicted by the cache and unpinned in the backend. + let _ = backend.pin_block(hash1); + let _ = backend.pin_block(hash2); + let _ = backend.pin_block(hash3); + assert_eq!(backend.pin_refs(&hash1), Some(1)); + assert_eq!(backend.pin_refs(&hash2), Some(1)); + assert_eq!(backend.pin_refs(&hash3), Some(1)); + + worker.handle_announce_message(hash1); + assert!(worker.lru().peek(&hash1).is_some()); + worker.handle_announce_message(hash2); + assert!(worker.lru().peek(&hash2).is_some()); + worker.handle_announce_message(hash3); + assert!(worker.lru().peek(&hash3).is_some()); + assert!(worker.lru().peek(&hash2).is_some()); + assert_eq!(worker.lru().len(), 2); + + // Hash 1 should have gotten unpinned, since its oldest. + assert_eq!(backend.pin_refs(&hash1), Some(0)); + assert_eq!(backend.pin_refs(&hash2), Some(1)); + assert_eq!(backend.pin_refs(&hash3), Some(1)); + + // Hash 2 is getting bumped. + worker.handle_announce_message(hash2); + assert_eq!(worker.lru().peek(&hash2), Some(&2)); + + // Since hash 2 was accessed, evict hash 3. + worker.handle_announce_message(hash4); + assert_eq!(worker.lru().peek(&hash3), None); + } +} -- GitLab From d05f8c57fb29b3c156e542c5473cbb09fae359bb Mon Sep 17 00:00:00 2001 From: Vladimir Istyufeev Date: Mon, 26 Feb 2024 21:40:02 +0400 Subject: [PATCH 03/86] Limit max execution time for `test-linux-stable` CI jobs (#3483) Tests run as part of the `test-linux-stable` jobs could hang as shown [here](https://gitlab.parity.io/parity/mirrors/polkadot-sdk/-/jobs/5341393). This PR adds a failsafe for such cases. --- .gitlab/pipeline/test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index b5e26d19489..9f774aab271 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -48,6 +48,7 @@ test-linux-stable: - target/nextest/default/junit.xml reports: junit: target/nextest/default/junit.xml + timeout: 90m test-linux-oldkernel-stable: extends: test-linux-stable -- GitLab From 4080632ee05fc3d7734c856b3af3a024d63197ef Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 26 Feb 2024 18:23:37 +0100 Subject: [PATCH 04/86] [prdoc] Validate crate names (#3467) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changes: - Add CI script to check that the `crate` names that are mentioned in prdocs are valid. We can extend it lateron to also validate the correct SemVer bumps as introduced in https://github.com/paritytech/polkadot-sdk/pull/3441. Example output: ```pre $ python3 .github/scripts/check-prdoc.py Cargo.toml prdoc/*.prdoc πŸ”Ž Reading workspace polkadot-sdk/Cargo.toml. πŸ“¦ Checking 36 prdocs against 494 crates. βœ… All prdocs are valid. ``` Note that not all old prdocs pass the check since crates have been renamed: ```pre $ python3 .github/scripts/check-prdoc.py Cargo.toml prdoc/**/*.prdoc πŸ”Ž Reading workspace polkadot-sdk/Cargo.toml. πŸ“¦ Checking 186 prdocs against 494 crates. ❌ Some prdocs are invalid. πŸ’₯ prdoc/1.4.0/pr_1926.prdoc lists invalid crate: node-cli πŸ’₯ prdoc/1.4.0/pr_2086.prdoc lists invalid crate: xcm-executor πŸ’₯ prdoc/1.4.0/pr_2107.prdoc lists invalid crate: xcm πŸ’₯ prdoc/1.6.0/pr_2684.prdoc lists invalid crate: xcm-builder ``` --------- Signed-off-by: Oliver Tale-Yazdi --- .github/scripts/check-prdoc.py | 71 +++++++++++++++++++++++++++++++ .github/workflows/check-prdoc.yml | 8 ++++ prdoc/1.4.0/pr_1246.prdoc | 2 +- prdoc/1.6.0/pr_2689.prdoc | 2 +- prdoc/1.6.0/pr_2771.prdoc | 2 +- prdoc/pr_3412.prdoc | 2 +- 6 files changed, 83 insertions(+), 4 deletions(-) create mode 100644 .github/scripts/check-prdoc.py diff --git a/.github/scripts/check-prdoc.py b/.github/scripts/check-prdoc.py new file mode 100644 index 00000000000..42b063f2885 --- /dev/null +++ b/.github/scripts/check-prdoc.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 + +''' +Ensure that the prdoc files are valid. + +# Example + +```sh +python3 -m pip install cargo-workspace +python3 .github/scripts/check-prdoc.py Cargo.toml prdoc/*.prdoc +``` + +Produces example output: +```pre +πŸ”Ž Reading workspace polkadot-sdk/Cargo.toml +πŸ“¦ Checking 32 prdocs against 493 crates. +βœ… All prdocs are valid +``` +''' + +import os +import yaml +import argparse +import cargo_workspace + +def check_prdoc_crate_names(root, paths): + ''' + Check that all crates of the `crates` section of each prdoc is present in the workspace. + ''' + + print(f'πŸ”Ž Reading workspace {root}.') + workspace = cargo_workspace.Workspace.from_path(root) + crate_names = [crate.name for crate in workspace.crates] + + print(f'πŸ“¦ Checking {len(paths)} prdocs against {len(crate_names)} crates.') + faulty = {} + + for path in paths: + with open(path, 'r') as f: + prdoc = yaml.safe_load(f) + + for crate in prdoc.get('crates', []): + crate = crate['name'] + if crate in crate_names: + continue + + faulty.setdefault(path, []).append(crate) + + if len(faulty) == 0: + print('βœ… All prdocs are valid.') + else: + print('❌ Some prdocs are invalid.') + for path, crates in faulty.items(): + print(f'πŸ’₯ {path} lists invalid crate: {", ".join(crates)}') + exit(1) + +def parse_args(): + parser = argparse.ArgumentParser(description='Check prdoc files') + parser.add_argument('root', help='The cargo workspace manifest', metavar='root', type=str, nargs=1) + parser.add_argument('prdoc', help='The prdoc files', metavar='prdoc', type=str, nargs='*') + args = parser.parse_args() + + if len(args.prdoc) == 0: + print('❌ Need at least one prdoc file as argument.') + exit(1) + + return { 'root': os.path.abspath(args.root[0]), 'prdocs': args.prdoc } + +if __name__ == '__main__': + args = parse_args() + check_prdoc_crate_names(args['root'], args['prdocs']) diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index 91701ca036e..c31dee06ec5 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -57,3 +57,11 @@ jobs: echo "Checking for PR#${GITHUB_PR}" echo "You can find more information about PRDoc at $PRDOC_DOC" $ENGINE run --rm -v $PWD:/repo -e RUST_LOG=info $IMAGE check -n ${GITHUB_PR} + + - name: Validate prdoc for PR#${{ github.event.pull_request.number }} + if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} + run: | + echo "Validating PR#${GITHUB_PR}" + python3 --version + python3 -m pip install cargo-workspace==1.2.1 + python3 .github/scripts/check-prdoc.py Cargo.toml prdoc/pr_${GITHUB_PR}.prdoc diff --git a/prdoc/1.4.0/pr_1246.prdoc b/prdoc/1.4.0/pr_1246.prdoc index a4d270c45cb..3b5c2017f22 100644 --- a/prdoc/1.4.0/pr_1246.prdoc +++ b/prdoc/1.4.0/pr_1246.prdoc @@ -11,7 +11,7 @@ migrations: description: "Messages from the DMP dispatch queue will be moved over to the MQ pallet via `on_initialize`. This happens over multiple blocks and emits a `Completed` event at the end. The pallet can be un-deployed and deleted afterwards. Note that the migration reverses the order of messages, which should be acceptable as a one-off." crates: - - name: cumulus_pallet_xcmp_queue + - name: cumulus-pallet-xcmp-queue note: Pallet config must be altered according to the MR description. host_functions: [] diff --git a/prdoc/1.6.0/pr_2689.prdoc b/prdoc/1.6.0/pr_2689.prdoc index 847c3e8026c..5d3081e3a4c 100644 --- a/prdoc/1.6.0/pr_2689.prdoc +++ b/prdoc/1.6.0/pr_2689.prdoc @@ -1,7 +1,7 @@ # Schema: Parity PR Documentation Schema (prdoc) # See doc at https://github.com/paritytech/prdoc -title: BEEFY: Support compatibility with Warp Sync - Allow Warp Sync for Validators +title: "BEEFY: Support compatibility with Warp Sync - Allow Warp Sync for Validators" doc: - audience: Node Operator diff --git a/prdoc/1.6.0/pr_2771.prdoc b/prdoc/1.6.0/pr_2771.prdoc index 1b49162e439..50fb99556ec 100644 --- a/prdoc/1.6.0/pr_2771.prdoc +++ b/prdoc/1.6.0/pr_2771.prdoc @@ -6,4 +6,4 @@ doc: Enable better req-response protocol versioning, by allowing for fallback requests on different protocols. crates: - - name: sc_network + - name: sc-network diff --git a/prdoc/pr_3412.prdoc b/prdoc/pr_3412.prdoc index d68f05e45dc..1ee6edfeb83 100644 --- a/prdoc/pr_3412.prdoc +++ b/prdoc/pr_3412.prdoc @@ -13,5 +13,5 @@ doc: crates: - name: pallet-babe - - name: pallet-aura-ext + - name: cumulus-pallet-aura-ext - name: pallet-session -- GitLab From 81d447a8feca14222fef57ffa721302a8b808562 Mon Sep 17 00:00:00 2001 From: philoniare Date: Tue, 27 Feb 2024 04:40:13 +0800 Subject: [PATCH 05/86] Cleanup String::from_utf8 (#3446) # Description *Refactors `String::from_utf8` usage in the pallet benchmarking Fixes #389 --------- Co-authored-by: command-bot <> --- .../benchmarking-cli/src/pallet/command.rs | 50 ++++++++++--------- .../benchmarking-cli/src/pallet/writer.rs | 46 +++++++---------- 2 files changed, 44 insertions(+), 52 deletions(-) diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index d5dc6226ab9..c6ba2824016 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -298,16 +298,23 @@ impl PalletCmd { // Convert `Vec` to `String` for better readability. let benchmarks_to_run: Vec<_> = benchmarks_to_run .into_iter() - .map(|b| { + .map(|(pallet, extrinsic, components, pov_modes)| { + let pallet_name = + String::from_utf8(pallet.clone()).expect("Encoded from String; qed"); + let extrinsic_name = + String::from_utf8(extrinsic.clone()).expect("Encoded from String; qed"); ( - b.0, - b.1, - b.2, - b.3.into_iter() + pallet, + extrinsic, + components, + pov_modes + .into_iter() .map(|(p, s)| { (String::from_utf8(p).unwrap(), String::from_utf8(s).unwrap()) }) .collect(), + pallet_name, + extrinsic_name, ) }) .collect(); @@ -329,12 +336,12 @@ impl PalletCmd { let mut component_ranges = HashMap::<(Vec, Vec), Vec>::new(); let pov_modes = Self::parse_pov_modes(&benchmarks_to_run)?; - for (pallet, extrinsic, components, _) in benchmarks_to_run.clone() { + for (pallet, extrinsic, components, _, pallet_name, extrinsic_name) in + benchmarks_to_run.clone() + { log::info!( target: LOG_TARGET, - "Starting benchmark: {}::{}", - String::from_utf8(pallet.clone()).expect("Encoded from String; qed"), - String::from_utf8(extrinsic.clone()).expect("Encoded from String; qed"), + "Starting benchmark: {pallet_name}::{extrinsic_name}" ); let all_components = if components.is_empty() { vec![Default::default()] @@ -415,12 +422,7 @@ impl PalletCmd { ) .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))? .map_err(|e| { - format!( - "Benchmark {}::{} failed: {}", - String::from_utf8_lossy(&pallet), - String::from_utf8_lossy(&extrinsic), - e - ) + format!("Benchmark {pallet_name}::{extrinsic_name} failed: {e}",) })?; } // Do one loop of DB tracking. @@ -494,11 +496,7 @@ impl PalletCmd { log::info!( target: LOG_TARGET, - "Running benchmark: {}.{}({} args) {}/{} {}/{}", - String::from_utf8(pallet.clone()) - .expect("Encoded from String; qed"), - String::from_utf8(extrinsic.clone()) - .expect("Encoded from String; qed"), + "Running benchmark: {pallet_name}::{extrinsic_name}({} args) {}/{} {}/{}", components.len(), s + 1, // s starts at 0. all_components.len(), @@ -707,12 +705,14 @@ impl PalletCmd { Vec, Vec<(BenchmarkParameter, u32, u32)>, Vec<(String, String)>, + String, + String, )>, ) -> Result { use std::collections::hash_map::Entry; let mut parsed = PovModesMap::new(); - for (pallet, call, _components, pov_modes) in benchmarks { + for (pallet, call, _components, pov_modes, _, _) in benchmarks { for (pallet_storage, mode) in pov_modes { let mode = PovEstimationMode::from_str(&mode)?; let splits = pallet_storage.split("::").collect::>(); @@ -766,6 +766,8 @@ fn list_benchmark( Vec, Vec<(BenchmarkParameter, u32, u32)>, Vec<(String, String)>, + String, + String, )>, list_output: ListOutput, no_csv_header: bool, @@ -773,11 +775,11 @@ fn list_benchmark( let mut benchmarks = BTreeMap::new(); // Sort and de-dub by pallet and function name. - benchmarks_to_run.iter().for_each(|(pallet, extrinsic, _, _)| { + benchmarks_to_run.iter().for_each(|(_, _, _, _, pallet_name, extrinsic_name)| { benchmarks - .entry(String::from_utf8_lossy(pallet).to_string()) + .entry(pallet_name) .or_insert_with(BTreeSet::new) - .insert(String::from_utf8_lossy(extrinsic).to_string()); + .insert(extrinsic_name); }); match list_output { diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs index 9493a693bbe..bd4b65d8a2e 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs @@ -153,8 +153,8 @@ fn map_results( continue } - let pallet_string = String::from_utf8(batch.pallet.clone()).unwrap(); - let instance_string = String::from_utf8(batch.instance.clone()).unwrap(); + let pallet_name = String::from_utf8(batch.pallet.clone()).unwrap(); + let instance_name = String::from_utf8(batch.instance.clone()).unwrap(); let benchmark_data = get_benchmark_data( batch, storage_info, @@ -166,7 +166,7 @@ fn map_results( worst_case_map_values, additional_trie_layers, ); - let pallet_benchmarks = all_benchmarks.entry((pallet_string, instance_string)).or_default(); + let pallet_benchmarks = all_benchmarks.entry((pallet_name, instance_name)).or_default(); pallet_benchmarks.push(benchmark_data); } Ok(all_benchmarks) @@ -571,19 +571,22 @@ pub(crate) fn process_storage_results( let mut prefix_result = result.clone(); let key_info = storage_info_map.get(&prefix); + let pallet_name = match key_info { + Some(k) => String::from_utf8(k.pallet_name.clone()).expect("encoded from string"), + None => "".to_string(), + }; + let storage_name = match key_info { + Some(k) => String::from_utf8(k.storage_name.clone()).expect("encoded from string"), + None => "".to_string(), + }; let max_size = key_info.and_then(|k| k.max_size); let override_pov_mode = match key_info { - Some(StorageInfo { pallet_name, storage_name, .. }) => { - let pallet_name = - String::from_utf8(pallet_name.clone()).expect("encoded from string"); - let storage_name = - String::from_utf8(storage_name.clone()).expect("encoded from string"); - + Some(_) => { // Is there an override for the storage key? - pov_modes.get(&(pallet_name.clone(), storage_name)).or( + pov_modes.get(&(pallet_name.clone(), storage_name.clone())).or( // .. or for the storage prefix? - pov_modes.get(&(pallet_name, "ALL".to_string())).or( + pov_modes.get(&(pallet_name.clone(), "ALL".to_string())).or( // .. or for the benchmark? pov_modes.get(&("ALL".to_string(), "ALL".to_string())), ), @@ -662,15 +665,10 @@ pub(crate) fn process_storage_results( // writes. if !is_prefix_identified { match key_info { - Some(key_info) => { + Some(_) => { let comment = format!( "Storage: `{}::{}` (r:{} w:{})", - String::from_utf8(key_info.pallet_name.clone()) - .expect("encoded from string"), - String::from_utf8(key_info.storage_name.clone()) - .expect("encoded from string"), - reads, - writes, + pallet_name, storage_name, reads, writes, ); comments.push(comment) }, @@ -698,11 +696,7 @@ pub(crate) fn process_storage_results( ) { Some(new_pov) => { let comment = format!( - "Proof: `{}::{}` (`max_values`: {:?}, `max_size`: {:?}, added: {}, mode: `{:?}`)", - String::from_utf8(key_info.pallet_name.clone()) - .expect("encoded from string"), - String::from_utf8(key_info.storage_name.clone()) - .expect("encoded from string"), + "Proof: `{pallet_name}::{storage_name}` (`max_values`: {:?}, `max_size`: {:?}, added: {}, mode: `{:?}`)", key_info.max_values, key_info.max_size, new_pov, @@ -711,13 +705,9 @@ pub(crate) fn process_storage_results( comments.push(comment) }, None => { - let pallet = String::from_utf8(key_info.pallet_name.clone()) - .expect("encoded from string"); - let item = String::from_utf8(key_info.storage_name.clone()) - .expect("encoded from string"); let comment = format!( "Proof: `{}::{}` (`max_values`: {:?}, `max_size`: {:?}, mode: `{:?}`)", - pallet, item, key_info.max_values, key_info.max_size, + pallet_name, storage_name, key_info.max_values, key_info.max_size, used_pov_mode, ); comments.push(comment); -- GitLab From f5de090aa56d0801f043c2b13892191b3d0c9f50 Mon Sep 17 00:00:00 2001 From: Javier Viola <363911+pepoviola@users.noreply.github.com> Date: Tue, 27 Feb 2024 08:24:07 -0300 Subject: [PATCH 06/86] fix(zombienet): increase timeout in download artifacts (#3376) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix timeouts downloading the artifacts (e.g https://gitlab.parity.io/parity/mirrors/polkadot-sdk/-/jobs/5246040) Thx! --------- Co-authored-by: Bastian KΓΆcher --- polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl b/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl index db0a60ac1df..5fe1b2ad2f1 100644 --- a/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl +++ b/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl @@ -10,9 +10,8 @@ dave: parachain 2001 block height is at least 10 within 200 seconds # POLKADOT_PR_ARTIFACTS_URL=https://gitlab.parity.io/parity/mirrors/polkadot/-/jobs/1842869/artifacts/raw/artifacts # with the version of polkadot you want to download. -# avg 30s in our infra -alice: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 60 seconds -bob: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 60 seconds +alice: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 240 seconds +bob: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 240 seconds # update the cmd to add the flag '--insecure-validator-i-know-what-i-do' # once the base image include the version with this flag we can remove this logic. alice: run ./0002-update-cmd.sh within 60 seconds -- GitLab From 94c54d50322076e78a687785594eb84c78b823d7 Mon Sep 17 00:00:00 2001 From: Clara van Staden Date: Tue, 27 Feb 2024 14:49:31 +0200 Subject: [PATCH 07/86] Snowbridge benchmark tests fix (#3424) When running `cargo test -p bridge-hub-rococo-runtime --features runtime-benchmarks`, two of the Snowbridge benchmark tests fails. The reason is that when the runtime-benchmarks feature is enabled, the `NoopMessageProcessor` message processor is used. The Snowbridge tests rely on the outbound messages to be processed using the message queue, so that we can check the expected nonce and block digest logs. This PR changes the conditional compilation to only use `NoopMessageProcessor` when compiling the executable to run benchmarks against, not when running tests. --------- Co-authored-by: claravanstaden --- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 15cf045a995..f576776696c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -102,8 +102,6 @@ use polkadot_runtime_common::prod_or_fast; #[cfg(feature = "runtime-benchmarks")] use benchmark_helpers::DoNothingRouter; -#[cfg(not(feature = "runtime-benchmarks"))] -use bridge_hub_common::BridgeHubMessageRouter; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -367,11 +365,15 @@ parameter_types! { impl pallet_message_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = weights::pallet_message_queue::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] + // Use the NoopMessageProcessor exclusively for benchmarks, not for tests with the + // runtime-benchmarks feature as tests require the BridgeHubMessageRouter to process messages. + // The "test" feature flag doesn't work, hence the reliance on the "std" feature, which is + // enabled during tests. + #[cfg(all(not(feature = "std"), feature = "runtime-benchmarks"))] type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = BridgeHubMessageRouter< + #[cfg(not(all(not(feature = "std"), feature = "runtime-benchmarks")))] + type MessageProcessor = bridge_hub_common::BridgeHubMessageRouter< xcm_builder::ProcessXcmMessage< AggregateMessageOrigin, xcm_executor::XcmExecutor, -- GitLab From 2cdda0e62dd3088d2fd09cea627059674070c277 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Tue, 27 Feb 2024 14:16:23 +0100 Subject: [PATCH 08/86] Bridge zombienet tests: Check amount received at destination (#3490) Related to https://github.com/paritytech/polkadot-sdk/issues/3475 --- .../rococo-westend/bridges_rococo_westend.sh | 12 ++++++++---- .../js-helpers/native-assets-balance-increased.js | 5 +++-- .../0001-asset-transfer/roc-reaches-westend.zndsl | 8 ++++---- .../0001-asset-transfer/wnd-reaches-rococo.zndsl | 8 ++++---- .../0001-asset-transfer/wroc-reaches-rococo.zndsl | 8 ++++---- .../0001-asset-transfer/wwnd-reaches-westend.zndsl | 8 ++++---- 6 files changed, 27 insertions(+), 22 deletions(-) diff --git a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh index de5be2e0af8..479ab833abf 100755 --- a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh +++ b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh @@ -319,6 +319,7 @@ case "$1" in $XCM_VERSION ;; reserve-transfer-assets-from-asset-hub-rococo-local) + amount=$2 ensure_polkadot_js_api # send ROCs to Alice account on AHW limited_reserve_transfer_assets \ @@ -326,11 +327,12 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 5000000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; withdraw-reserve-assets-from-asset-hub-rococo-local) + amount=$2 ensure_polkadot_js_api # send back only 100000000000 wrappedWNDs to Alice account on AHW limited_reserve_transfer_assets \ @@ -338,11 +340,12 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": 3000000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; reserve-transfer-assets-from-asset-hub-westend-local) + amount=$2 ensure_polkadot_js_api # send WNDs to Alice account on AHR limited_reserve_transfer_assets \ @@ -350,11 +353,12 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 5000000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; withdraw-reserve-assets-from-asset-hub-westend-local) + amount=$2 ensure_polkadot_js_api # send back only 100000000000 wrappedROCs to Alice account on AHR limited_reserve_transfer_assets \ @@ -362,7 +366,7 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } } }, "fun": { "Fungible": 3000000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; diff --git a/bridges/testing/framework/js-helpers/native-assets-balance-increased.js b/bridges/testing/framework/js-helpers/native-assets-balance-increased.js index a35c753d973..749c3e2fec3 100644 --- a/bridges/testing/framework/js-helpers/native-assets-balance-increased.js +++ b/bridges/testing/framework/js-helpers/native-assets-balance-increased.js @@ -3,12 +3,13 @@ async function run(nodeName, networkInfo, args) { const api = await zombie.connect(wsUri, userDefinedTypes); const accountAddress = args[0]; + const expectedIncrease = BigInt(args[1]); const initialAccountData = await api.query.system.account(accountAddress); const initialAccountBalance = initialAccountData.data['free']; while (true) { const accountData = await api.query.system.account(accountAddress); const accountBalance = accountData.data['free']; - if (accountBalance > initialAccountBalance) { + if (accountBalance > initialAccountBalance + expectedIncrease) { return accountBalance; } @@ -17,4 +18,4 @@ async function run(nodeName, networkInfo, args) { } } -module.exports = { run } +module.exports = {run} diff --git a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl index 4725362ae82..a58520ccea6 100644 --- a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl @@ -2,11 +2,11 @@ Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config -# send ROC to //Alice from Rococo AH to Westend AH -asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-rococo-local" within 120 seconds +# send 5 ROC to //Alice from Rococo AH to Westend AH +asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-rococo-local 5000000000000" within 120 seconds -# check that //Alice received the ROC on Westend AH -asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Rococo" within 300 seconds +# check that //Alice received at least 4.8 ROC on Westend AH +asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Rococo" within 300 seconds # check that the relayer //Charlie is rewarded by Westend AH bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726F,ThisChain,0" within 30 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl index 77267239c3b..fedb78cc210 100644 --- a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl @@ -2,11 +2,11 @@ Description: User is able to transfer WND from Westend Asset Hub to Rococo Asset Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml Creds: config -# send WND to //Alice from Westend AH to Rococo AH -asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-westend-local" within 120 seconds +# send 5 WND to //Alice from Westend AH to Rococo AH +asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-westend-local 5000000000000" within 120 seconds -# check that //Alice received the WND on Rococo AH -asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Westend" within 300 seconds +# check that //Alice received at least 4.8 WND on Rococo AH +asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Westend" within 300 seconds # check that the relayer //Charlie is rewarded by Rococo AH bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,ThisChain,0" within 30 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl b/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl index f72b76e6026..68b888b6858 100644 --- a/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl @@ -2,9 +2,9 @@ Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config -# send wROC back to Alice from Westend AH to Rococo AH -asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-westend-local" within 120 seconds +# send 3 wROC back to Alice from Westend AH to Rococo AH +asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-westend-local 3000000000000" within 120 seconds -# check that //Alice received the wROC on Rococo AH +# check that //Alice received at least 2.8 wROC on Rococo AH # (we wait until //Alice account increases here - there are no other transactions that may increase it) -asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 300 seconds +asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,2800000000000" within 300 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl b/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl index 9d893c8d90a..1a8a1618195 100644 --- a/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl @@ -2,9 +2,9 @@ Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config -# send wWND back to Alice from Rococo AH to Westend AH -asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-rococo-local" within 120 seconds +# send 3 wWND back to Alice from Rococo AH to Westend AH +asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-rococo-local 3000000000000" within 120 seconds -# check that //Alice received the wWND on Westend AH +# check that //Alice received at least 2.8 wWND on Westend AH # (we wait until //Alice account increases here - there are no other transactions that may increase it) -asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 300 seconds +asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,2800000000000" within 300 seconds -- GitLab From 29369a4e7ca92d38a305a3a3115609718785eba9 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Tue, 27 Feb 2024 14:50:21 +0000 Subject: [PATCH 09/86] Add documentation around FRAME Origin (#3362) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Does the following: - Add a reference doc page named `frame_runtime_types`, which explains what types like `RuntimeOrigin`, `RuntimeCall` etc are. - On top of it, it adds a reference doc page called `frame_origin` which explains a few important patterns that we use around origins - And finally brushes up `#[frame::origin]` docs. - Updates the theme, sidebar and favicon to look like: Screenshot 2024-02-20 at 12 16 00 All of this was inspired by https://substrate.stackexchange.com/questions/10992/how-do-you-find-the-public-key-for-the-medium-spender-track-origin/10993 closes https://github.com/paritytech/polkadot-sdk-docs/issues/45 closes https://github.com/paritytech/polkadot-sdk-docs/issues/43 contributes / overlaps with https://github.com/paritytech/polkadot-sdk/pull/2638 cc @liamaharon deprecation companion: https://github.com/substrate-developer-hub/substrate-docs/pull/2131 pba-content companion: https://github.com/Polkadot-Blockchain-Academy/pba-content/pull/977 --------- Co-authored-by: Radha <86818441+DrW3RK@users.noreply.github.com> Co-authored-by: Sebastian Kunert Co-authored-by: GonΓ§alo Pestana Co-authored-by: Liam Aharon --- .gitlab/pipeline/build.yml | 2 +- Cargo.lock | 9 + docs/mermaid/IA.mmd | 2 +- docs/mermaid/outer_runtime_types.mmd | 3 + docs/sdk/Cargo.toml | 20 +- docs/sdk/headers/header.html | 139 ++++++++ docs/sdk/headers/theme.css | 16 + docs/sdk/headers/toc.html | 54 ---- docs/sdk/src/guides/your_first_pallet/mod.rs | 9 +- docs/sdk/src/lib.rs | 7 +- docs/sdk/src/meta_contributing.rs | 6 +- .../src/reference_docs/extrinsic_encoding.rs | 2 +- .../reference_docs/frame_composite_enums.rs | 1 - docs/sdk/src/reference_docs/frame_origin.rs | 270 +++++++++++++++- .../src/reference_docs/frame_runtime_types.rs | 306 ++++++++++++++++++ docs/sdk/src/reference_docs/mod.rs | 8 +- substrate/frame/src/lib.rs | 6 + .../src/construct_runtime/expand/origin.rs | 2 +- substrate/frame/support/procedural/src/lib.rs | 17 +- substrate/frame/support/src/lib.rs | 68 +++- substrate/frame/system/src/lib.rs | 1 + 21 files changed, 839 insertions(+), 109 deletions(-) create mode 100644 docs/mermaid/outer_runtime_types.mmd create mode 100644 docs/sdk/headers/header.html create mode 100644 docs/sdk/headers/theme.css delete mode 100644 docs/sdk/headers/toc.html delete mode 100644 docs/sdk/src/reference_docs/frame_composite_enums.rs create mode 100644 docs/sdk/src/reference_docs/frame_runtime_types.rs diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 15b4869997b..423587c1fb5 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -91,7 +91,7 @@ build-rustdoc: - .run-immediately variables: SKIP_WASM_BUILD: 1 - RUSTDOCFLAGS: "" + RUSTDOCFLAGS: "--default-theme=ayu --html-in-header ./docs/sdk/headers/header.html --extend-css ./docs/sdk/headers/theme.css" artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc" when: on_success diff --git a/Cargo.lock b/Cargo.lock index 410b45d0018..c52feafe7a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13384,11 +13384,20 @@ dependencies = [ "cumulus-pallet-parachain-system", "docify", "frame", + "frame-system", "kitchensink-runtime", "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collective", "pallet-default-config-example", + "pallet-democracy", "pallet-examples", + "pallet-multisig", + "pallet-proxy", "pallet-timestamp", + "pallet-transaction-payment", + "pallet-utility", "parity-scale-codec", "sc-cli", "sc-client-db", diff --git a/docs/mermaid/IA.mmd b/docs/mermaid/IA.mmd index 93d3e92814c..4eb50bcf96a 100644 --- a/docs/mermaid/IA.mmd +++ b/docs/mermaid/IA.mmd @@ -3,7 +3,7 @@ flowchart devhub --> polkadot_sdk devhub --> reference_docs - devhub --> tutorial + devhub --> guides polkadot_sdk --> substrate polkadot_sdk --> frame diff --git a/docs/mermaid/outer_runtime_types.mmd b/docs/mermaid/outer_runtime_types.mmd new file mode 100644 index 00000000000..c909df16af1 --- /dev/null +++ b/docs/mermaid/outer_runtime_types.mmd @@ -0,0 +1,3 @@ +flowchart LR + RuntimeCall --"TryInto"--> PalletCall + PalletCall --"Into"--> RuntimeCall diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 05aced8751a..576a81834f8 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -17,7 +17,10 @@ workspace = true # Needed for all FRAME-based code parity-scale-codec = { version = "3.0.0", default-features = false } scale-info = { version = "2.6.0", default-features = false } -frame = { path = "../../substrate/frame", features = ["experimental", "runtime"] } +frame = { path = "../../substrate/frame", features = [ + "experimental", + "runtime", +] } pallet-examples = { path = "../../substrate/frame/examples" } pallet-default-config-example = { path = "../../substrate/frame/examples/default-config" } @@ -52,7 +55,18 @@ cumulus-pallet-parachain-system = { path = "../../cumulus/pallets/parachain-syst ] } parachain-info = { package = "staging-parachain-info", path = "../../cumulus/parachains/pallets/parachain-info" } pallet-aura = { path = "../../substrate/frame/aura", default-features = false } + +# Pallets and FRAME internals pallet-timestamp = { path = "../../substrate/frame/timestamp" } +pallet-balances = { path = "../../substrate/frame/balances" } +pallet-transaction-payment = { path = "../../substrate/frame/transaction-payment" } +pallet-utility = { path = "../../substrate/frame/utility" } +pallet-multisig = { path = "../../substrate/frame/multisig" } +pallet-proxy = { path = "../../substrate/frame/proxy" } +pallet-authorship = { path = "../../substrate/frame/authorship" } +pallet-collective = { path = "../../substrate/frame/collective" } +pallet-democracy = { path = "../../substrate/frame/democracy" } +frame-system = { path = "../../substrate/frame/system" } # Primitives sp-io = { path = "../../substrate/primitives/io" } @@ -64,9 +78,5 @@ sp-runtime = { path = "../../substrate/primitives/runtime" } # XCM xcm = { package = "staging-xcm", path = "../../polkadot/xcm" } -[dev-dependencies] -parity-scale-codec = "3.6.5" -scale-info = "2.9.0" - [features] experimental = ["pallet-aura/experimental"] diff --git a/docs/sdk/headers/header.html b/docs/sdk/headers/header.html new file mode 100644 index 00000000000..68dc5d5509e --- /dev/null +++ b/docs/sdk/headers/header.html @@ -0,0 +1,139 @@ + + + diff --git a/docs/sdk/headers/theme.css b/docs/sdk/headers/theme.css new file mode 100644 index 00000000000..bb9254ec4a8 --- /dev/null +++ b/docs/sdk/headers/theme.css @@ -0,0 +1,16 @@ +:root { + --polkadot-pink: #E6007A ; + --polkadot-green: #56F39A ; + --polkadot-lime: #D3FF33 ; + --polkadot-cyan: #00B2FF ; + --polkadot-purple: #552BBF ; + } + +body > nav.sidebar > div.sidebar-crate > a > img { + /* logo width, given that the sidebar width is 200px; */ + width: 190px; +} + +body nav.sidebar { + flex: 0 0 250px; +} diff --git a/docs/sdk/headers/toc.html b/docs/sdk/headers/toc.html deleted file mode 100644 index a4a074cb4f3..00000000000 --- a/docs/sdk/headers/toc.html +++ /dev/null @@ -1,54 +0,0 @@ - - diff --git a/docs/sdk/src/guides/your_first_pallet/mod.rs b/docs/sdk/src/guides/your_first_pallet/mod.rs index 29cdda36ed1..20a8639b270 100644 --- a/docs/sdk/src/guides/your_first_pallet/mod.rs +++ b/docs/sdk/src/guides/your_first_pallet/mod.rs @@ -128,8 +128,8 @@ //! //! Recall that within our pallet, (almost) all blocks of code are generic over ``. And, //! because `trait Config: frame_system::Config`, we can get access to all items in `Config` (or -//! `frame_system::Config`) using `T::NameOfItem`. This is all within the boundaries of how Rust -//! traits and generics work. If unfamiliar with this pattern, read +//! `frame_system::Config`) using `T::NameOfItem`. This is all within the boundaries of how +//! Rust traits and generics work. If unfamiliar with this pattern, read //! [`crate::reference_docs::trait_based_programming`] before going further. //! //! Crucially, a typical FRAME runtime contains a `struct Runtime`. The main role of this `struct` @@ -270,7 +270,7 @@ #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", config_v2)] //! //! > These `Runtime*` types are better explained in -//! > [`crate::reference_docs::frame_composite_enums`]. +//! > [`crate::reference_docs::frame_runtime_types`]. //! //! Then, we can rewrite the `transfer` dispatchable as such: #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_v2)] @@ -285,7 +285,6 @@ //! [`crate::guides::your_first_pallet::pallet_v2::tests::runtime_v2::RuntimeEvent`]. //! //! -//! //! ## What Next? //! //! The following topics where used in this guide, but not covered in depth. It is suggested to @@ -293,7 +292,7 @@ //! //! - [`crate::reference_docs::safe_defensive_programming`]. //! - [`crate::reference_docs::frame_origin`]. -//! - [`crate::reference_docs::frame_composite_enums`]. +//! - [`crate::reference_docs::frame_runtime_types`]. //! - The pallet we wrote in this guide was using `dev_mode`, learn more in //! [`frame::pallet_macros::config`]. //! - Learn more about the individual pallet items/macros, such as event and errors and call, in diff --git a/docs/sdk/src/lib.rs b/docs/sdk/src/lib.rs index 075d9ddaffe..e211476d251 100644 --- a/docs/sdk/src/lib.rs +++ b/docs/sdk/src/lib.rs @@ -15,7 +15,7 @@ //! - Start by learning about the the [`polkadot_sdk`], its structure and context. //! - Then, head over the [`guides`]. This modules contains in-depth guides about the most important //! user-journeys of the Polkadot SDK. -//! - Whilst reading the guides, you might find back-links to [`crate::reference_docs`]. +//! - Whilst reading the guides, you might find back-links to [`reference_docs`]. //! - Finally, is the parent website of this crate that contains the //! list of further tools related to the Polkadot SDK. //! @@ -25,6 +25,11 @@ #![doc = simple_mermaid::mermaid!("../../mermaid/IA.mmd")] #![warn(rustdoc::broken_intra_doc_links)] #![warn(rustdoc::private_intra_doc_links)] +#![doc(html_favicon_url = "https://polkadot.network/favicon-32x32.png")] +#![doc( + html_logo_url = "https://europe1.discourse-cdn.com/standard21/uploads/polkadot2/original/1X/eb57081e2bb7c39e5fcb1a98b443e423fa4448ae.svg" +)] +#![doc(issue_tracker_base_url = "https://github.com/paritytech/polkadot-sdk/issues")] /// Meta information about this crate, how it is built, what principles dictates its evolution and /// how one can contribute to it. diff --git a/docs/sdk/src/meta_contributing.rs b/docs/sdk/src/meta_contributing.rs index 7ecf8b0adfd..fcdcea9934b 100644 --- a/docs/sdk/src/meta_contributing.rs +++ b/docs/sdk/src/meta_contributing.rs @@ -101,7 +101,7 @@ //! * Before even getting started, what is with all of this ``? We link to //! [`crate::reference_docs::trait_based_programming`]. //! * First, the name. Why is this called `pallet::call`? This goes back to `enum Call`, which is -//! explained in [`crate::reference_docs::frame_composite_enums`]. Build on top of this! +//! explained in [`crate::reference_docs::frame_runtime_types`]. Build on top of this! //! * Then, what is `origin`? Just an account id? [`crate::reference_docs::frame_origin`]. //! * Then, what is `DispatchResult`? Why is this called *dispatch*? Probably something that can be //! explained in the documentation of [`frame::prelude::DispatchResult`]. @@ -138,7 +138,9 @@ //! injected, run: //! //! ```sh -//! SKIP_WASM_BUILD=1 RUSTDOCFLAGS="--html-in-header $(pwd)/docs/sdk/headers/toc.html" cargo doc -p polkadot-sdk-docs --no-deps --open +//! SKIP_WASM_BUILD=1 \ +//! RUSTDOCFLAGS="--html-in-header $(pwd)/docs/sdk/headers/header.html --extend-css $(pwd)/docs/sdk/headers/theme.css --default-theme=ayu" \ +//! cargo doc -p polkadot-sdk-docs --no-deps --open //! ``` //! //! If even faster build time for docs is needed, you can temporarily remove most of the diff --git a/docs/sdk/src/reference_docs/extrinsic_encoding.rs b/docs/sdk/src/reference_docs/extrinsic_encoding.rs index 9008f8f835f..8c8568a228f 100644 --- a/docs/sdk/src/reference_docs/extrinsic_encoding.rs +++ b/docs/sdk/src/reference_docs/extrinsic_encoding.rs @@ -127,7 +127,7 @@ //! runtimes, a call is represented as an enum of enums, where the outer enum represents the FRAME //! pallet being called, and the inner enum represents the call being made within that pallet, and //! any arguments to it. Read more about the call enum -//! [here][crate::reference_docs::frame_composite_enums]. +//! [here][crate::reference_docs::frame_runtime_types]. //! //! FRAME `Call` enums are automatically generated, and end up looking something like this: #![doc = docify::embed!("./src/reference_docs/extrinsic_encoding.rs", call_data)] diff --git a/docs/sdk/src/reference_docs/frame_composite_enums.rs b/docs/sdk/src/reference_docs/frame_composite_enums.rs deleted file mode 100644 index 6051cd53446..00000000000 --- a/docs/sdk/src/reference_docs/frame_composite_enums.rs +++ /dev/null @@ -1 +0,0 @@ -//! # FRAME Composite Enums diff --git a/docs/sdk/src/reference_docs/frame_origin.rs b/docs/sdk/src/reference_docs/frame_origin.rs index a4078377cd7..49533157b01 100644 --- a/docs/sdk/src/reference_docs/frame_origin.rs +++ b/docs/sdk/src/reference_docs/frame_origin.rs @@ -1,14 +1,260 @@ //! # FRAME Origin //! -//! Notes: -//! -//! - Def talk about account abstraction and how it is a solved issue in frame. See Gav's talk in -//! Protocol Berg 2023 -//! - system's raw origin, how it is amalgamated with other origins into one type -//! [`frame_composite_enums`] -//! - signed origin -//! - unsigned origin, link to [`fee_less_runtime`] -//! - Root origin, how no one can obtain it. -//! - Abstract origin: how FRAME allows you to express "origin is 2/3 of the this body or 1/2 of -//! that body or half of the token holders". -//! - `type CustomOrigin: EnsureOrigin<_>` in pallets. +//! Let's start by clarifying a common wrong assumption about Origin: +//! +//! **ORIGIN IS NOT AN ACCOUNT ID**. +//! +//! FRAME's origin abstractions allow you to convey meanings far beyond just an account-id being the +//! caller of an extrinsic. Nonetheless, an account-id having signed an extrinsic is one of the +//! meanings that an origin can convey. This is the commonly used [`frame_system::ensure_signed`], +//! where the return value happens to be an account-id. +//! +//! Instead, let's establish the following as the correct definition of an origin: +//! +//! > The origin type represents the privilege level of the caller of an extrinsic. +//! +//! That is, an extrinsic, through checking the origin, can *express what privilege level it wishes +//! to impose on the caller of the extrinsic*. One of those checks can be as simple as "*any account +//! that has signed a statement can pass*". +//! +//! But the origin system can also express more abstract and complicated privilege levels. For +//! example: +//! +//! * If the majority of token holders agreed upon this. This is more or less what the +//! [`pallet_democracy`] does under the hood ([reference](https://github.com/paritytech/polkadot-sdk/blob/edd95b3749754d2ed0c5738588e872c87be91624/substrate/frame/democracy/src/lib.rs#L1603-L1633)). +//! * If a specific ratio of an instance of [`pallet_collective`]/DAO agrees upon this. +//! * If another consensus system, for example a bridged network or a parachain, agrees upon this. +//! * If the majority of validator/authority set agrees upon this[^1]. +//! * If caller holds a particular NFT. +//! +//! and many more. +//! +//! ## Context +//! +//! First, let's look at where the `origin` type is encountered in a typical pallet. The `origin: +//! OriginFor` has to be the first argument of any given callable extrinsic in FRAME: +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", call_simple)] +//! +//! Typically, the code of an extrinsic starts with an origin check, such as +//! [`frame_system::ensure_signed`]. +//! +//! Note that [`OriginFor`](frame_system::pallet_prelude::OriginFor) is merely a shorthand for +//! [`frame_system::Config::RuntimeOrigin`]. Given the name prefix `Runtime`, we can learn that +//! `RuntimeOrigin` is similar to `RuntimeCall` and others, a runtime composite enum that is +//! amalgamated at the runtime level. Read [`crate::reference_docs::frame_runtime_types`] to +//! familiarize yourself with these types. +//! +//! To understand this better, we will next create a pallet with a custom origin, which will add a +//! new variant to `RuntimeOrigin`. +//! +//! ## Adding Custom Pallet Origin to the Runtime +//! +//! For example, given a pallet that defines the following custom origin: +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", custom_origin)] +//! +//! And a runtime with the following pallets: +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", runtime_exp)] +//! +//! The type [`crate::reference_docs::frame_origin::runtime_for_origin::RuntimeOrigin`] is expanded. +//! This `RuntimeOrigin` contains a variant for the [`frame_system::RawOrigin`] and the custom +//! origin of the pallet. +//! +//! > Notice how the [`frame_system::ensure_signed`] is nothing more than a `match` statement. If +//! > you want to know where the actual origin of an extrinsic is set (and the signature +//! > verification happens, if any), see +//! > [`sp_runtime::generic::CheckedExtrinsic#trait-implementations`], specifically +//! > [`sp_runtime::traits::Applyable`]'s implementation. +//! +//! ## Asserting on a Custom Internal Origin +//! +//! In order to assert on a custom origin that is defined within your pallet, we need a way to first +//! convert the `::RuntimeOrigin` into the local `enum Origin` of the +//! current pallet. This is a common process that is explained in +//! [`crate::reference_docs::frame_runtime_types# +//! adding-further-constraints-to-runtime-composite-enums`]. +//! +//! We use the same process here to express that `RuntimeOrigin` has a number of additional bounds, +//! as follows. +//! +//! 1. Defining a custom `RuntimeOrigin` with further bounds in the pallet. +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", custom_origin_bound)] +//! +//! 2. Using it in the pallet. +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", custom_origin_usage)] +//! +//! ## Asserting on a Custom External Origin +//! +//! Very often, a pallet wants to have a parameterized origin that is **NOT** defined within the +//! pallet. In other words, a pallet wants to delegate an origin check to something that is +//! specified later at the runtime level. Like many other parameterizations in FRAME, this implies +//! adding a new associated type to `trait Config`. +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", external_origin_def)] +//! +//! Then, within the pallet, we can simply use this "unknown" origin check type: +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", external_origin_usage)] +//! +//! Finally, at the runtime, any implementation of [`frame::traits::EnsureOrigin`] can be passed. +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", external_origin_provide)] +//! +//! Indeed, some of these implementations of [`frame::traits::EnsureOrigin`] are similar to the ones +//! that we know about: [`frame::runtime::prelude::EnsureSigned`], +//! [`frame::runtime::prelude::EnsureSignedBy`], [`frame::runtime::prelude::EnsureRoot`], +//! [`frame::runtime::prelude::EnsureNone`], etc. But, there are also many more that are not known +//! to us, and are defined in other pallets. +//! +//! For example, [`pallet_collective`] defines [`pallet_collective::EnsureMember`] and +//! [`pallet_collective::EnsureProportionMoreThan`] and many more, which is exactly what we alluded +//! to earlier in this document. +//! +//! Make sure to check the full list of [implementors of +//! `EnsureOrigin`](frame::traits::EnsureOrigin#implementors) for more inspiration. +//! +//! ## Obtaining Abstract Origins +//! +//! So far we have learned that FRAME pallets can assert on custom and abstract origin types, +//! whether they are defined within the pallet or not. But how can we obtain these abstract origins? +//! +//! > All extrinsics that come from the outer world can generally only be obtained as either +//! > `signed` or `none` origin. +//! +//! Generally, these abstract origins are only obtained within the runtime, when a call is +//! dispatched within the runtime. +//! +//! ## Further References +//! +//! - [Gavin Wood's speech about FRAME features at Protocol Berg 2023.](https://youtu.be/j7b8Upipmeg?si=83_XUgYuJxMwWX4g&t=195) +//! - [A related StackExchange question.](https://substrate.stackexchange.com/questions/10992/how-do-you-find-the-public-key-for-the-medium-spender-track-origin) +//! +//! [^1]: Inherents are essentially unsigned extrinsics that need an [`frame_system::ensure_none`] +//! origin check, and through the virtue of being an inherent, are agreed upon by all validators. + +use frame::prelude::*; + +#[frame::pallet(dev_mode)] +pub mod pallet_for_origin { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export(call_simple)] + #[pallet::call] + impl Pallet { + pub fn do_something(_origin: OriginFor) -> DispatchResult { + // ^^^^^^^^^^^^^^^^^^^^^ + todo!(); + } + } +} + +#[frame::pallet(dev_mode)] +pub mod pallet_with_custom_origin { + use super::*; + + #[docify::export(custom_origin_bound)] + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeOrigin: From<::RuntimeOrigin> + + Into::RuntimeOrigin>>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export(custom_origin)] + /// A dummy custom origin. + #[pallet::origin] + #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] + pub enum Origin { + /// If all holders of a particular NFT have agreed upon this. + AllNftHolders, + /// If all validators have agreed upon this. + ValidatorSet, + } + + #[docify::export(custom_origin_usage)] + #[pallet::call] + impl Pallet { + pub fn only_validators(origin: OriginFor) -> DispatchResult { + // first, we convert from `::RuntimeOrigin` to `::RuntimeOrigin` + let local_runtime_origin = <::RuntimeOrigin as From< + ::RuntimeOrigin, + >>::from(origin); + // then we convert to `origin`, if possible + let local_origin = + local_runtime_origin.into().map_err(|_| "invalid origin type provided")?; + ensure!(matches!(local_origin, Origin::ValidatorSet), "Not authorized"); + todo!(); + } + } +} + +pub mod runtime_for_origin { + use super::pallet_with_custom_origin; + use frame::{runtime::prelude::*, testing_prelude::*}; + + #[docify::export(runtime_exp)] + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletWithCustomOrigin: pallet_with_custom_origin, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + impl pallet_with_custom_origin::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + } +} + +#[frame::pallet(dev_mode)] +pub mod pallet_with_external_origin { + use super::*; + #[docify::export(external_origin_def)] + #[pallet::config] + pub trait Config: frame_system::Config { + type ExternalOrigin: EnsureOrigin; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export(external_origin_usage)] + #[pallet::call] + impl Pallet { + pub fn externally_checked_ext(origin: OriginFor) -> DispatchResult { + let _ = T::ExternalOrigin::ensure_origin(origin)?; + todo!(); + } + } +} + +pub mod runtime_for_external_origin { + use super::*; + use frame::{runtime::prelude::*, testing_prelude::*}; + + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletWithExternalOrigin: pallet_with_external_origin, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + #[docify::export(external_origin_provide)] + impl pallet_with_external_origin::Config for Runtime { + type ExternalOrigin = EnsureSigned<::AccountId>; + } +} diff --git a/docs/sdk/src/reference_docs/frame_runtime_types.rs b/docs/sdk/src/reference_docs/frame_runtime_types.rs new file mode 100644 index 00000000000..b5838b79e51 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_runtime_types.rs @@ -0,0 +1,306 @@ +//! # FRAME Runtime Types +//! +//! This reference document briefly explores the idea around types generated at the runtime level by +//! the FRAME macros. +//! +//! > As of now, many of these important types are generated within the internals of +//! > [`construct_runtime`], and there is no easy way for you to visually know they exist. +//! > [#polkadot-sdk#1378](https://github.com/paritytech/polkadot-sdk/pull/1378) is meant to +//! > significantly improve this. Exploring the rust-docs of a runtime, such as [`runtime`] which is +//! > defined in this module is as of now the best way to learn about these types. +//! +//! ## Composite Enums +//! +//! Many types within a FRAME runtime follow the following structure: +//! +//! * Each individual pallet defines a type, for example `Foo`. +//! * At the runtime level, these types are amalgamated into a single type, for example +//! `RuntimeFoo`. +//! +//! As the names suggest, all composite enums in a FRAME runtime start their name with `Runtime`. +//! For example, `RuntimeCall` is a representation of the most high level `Call`-able type in the +//! runtime. +//! +//! Composite enums are generally convertible to their individual parts as such: +#![doc = simple_mermaid::mermaid!("../../../mermaid/outer_runtime_types.mmd")] +//! +//! In that one can always convert from the inner type into the outer type, but not vice versa. This +//! is usually expressed by implementing `From`, `TryFrom`, `From>` and similar traits. +//! +//! ### Example +//! +//! We provide the following two pallets: [`pallet_foo`] and [`pallet_bar`]. Each define a +//! dispatchable, and `Foo` also defines a custom origin. Lastly, `Bar` defines an additional +//! `GenesisConfig`. +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", pallet_foo)] +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", pallet_bar)] +//! +//! Let's explore how each of these affect the [`RuntimeCall`], [`RuntimeOrigin`] and +//! [`RuntimeGenesisConfig`] generated in [`runtime`] by respectively. +//! +//! As observed, [`RuntimeCall`] has 3 variants, one for each pallet and one for `frame_system`. If +//! you explore further, you will soon realize that each variant is merely a pointer to the `Call` +//! type in each pallet, for example [`pallet_foo::Call`]. +//! +//! [`RuntimeOrigin`]'s [`OriginCaller`] has two variants, one for system, and one for `pallet_foo` +//! which utilized [`frame::pallet_macros::origin`]. +//! +//! Finally, [`RuntimeGenesisConfig`] is composed of `frame_system` and a variant for `pallet_bar`'s +//! [`pallet_bar::GenesisConfig`]. +//! +//! You can find other composite enums by scanning [`runtime`] for other types who's name starts +//! with `Runtime`. Some of the more noteworthy ones are: +//! +//! - [`RuntimeEvent`] +//! - [`RuntimeError`] +//! - [`RuntimeHoldReason`] +//! +//! ### Adding Further Constraints to Runtime Composite Enums +//! +//! This section explores a common scenario where a pallet has access to one of these runtime +//! composite enums, but it wishes to further specify it by adding more trait bounds to it. +//! +//! Let's take the example of `RuntimeCall`. This is an associated type in +//! [`frame_system::Config::RuntimeCall`], and all pallets have access to this type, because they +//! have access to [`frame_system::Config`]. Finally, this type is meant to be set to outer call of +//! the entire runtime. +//! +//! But, let's not forget that this is information that *we know*, and the Rust compiler does not. +//! All that the rust compiler knows about this type is *ONLY* what the trait bounds of +//! [`frame_system::Config::RuntimeCall`] are specifying: +#![doc = docify::embed!("../../substrate/frame/system/src/lib.rs", system_runtime_call)] +//! +//! So, when at a given pallet, one accesses `::RuntimeCall`, the type is +//! extremely opaque from the perspective of the Rust compiler. +//! +//! How can a pallet access the `RuntimeCall` type with further constraints? For example, each +//! pallet has its own `enum Call`, and knows that its local `Call` is a part of `RuntimeCall`, +//! therefore there should be a `impl From> for RuntimeCall`. +//! +//! The only way to express this using Rust's associated types is for the pallet to **define its own +//! associated type `RuntimeCall`, and further specify what it thinks `RuntimeCall` should be**. +//! +//! In this case, we will want to assert the existence of [`frame::traits::IsSubType`], which is +//! very similar to [`TryFrom`]. +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", custom_runtime_call)] +//! +//! And indeed, at the runtime level, this associated type would be the same `RuntimeCall` that is +//! passed to `frame_system`. +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", pallet_with_specific_runtime_call_impl)] +//! +//! > In other words, the degree of specificity that [`frame_system::Config::RuntimeCall`] has is +//! > not enough for the pallet to work with. Therefore, the pallet has to define its own associated +//! > type representing `RuntimeCall`. +//! +//! Another way to look at this is: +//! +//! `pallet_with_specific_runtime_call::Config::RuntimeCall` and `frame_system::Config::RuntimeCall` +//! are two different representations of the same concrete type that is only known when the runtime +//! is being constructed. +//! +//! Now, within this pallet, this new `RuntimeCall` can be used, and it can use its new trait +//! bounds, such as being [`frame::traits::IsSubType`]: +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", custom_runtime_call_usages)] +//! +//! ### Asserting Equality of Multiple Runtime Composite Enums +//! +//! Recall that in the above example, `::RuntimeCall` and `::RuntimeCall` are expected to be equal types, but at the compile-time we +//! have to represent them with two different associated types with different bounds. Would it not +//! be cool if we had a test to make sure they actually resolve to the same concrete type once the +//! runtime is constructed? The following snippet exactly does that: +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", assert_equality)] +//! +//! We leave it to the reader to further explore what [`frame::traits::Hooks::integrity_test`] is, +//! and what [`core::any::TypeId`] is. Another way to assert this is using +//! [`frame::traits::IsType`]. +//! +//! ## Type Aliases +//! +//! A number of type aliases are generated by the `construct_runtime` which are also noteworthy: +//! +//! * [`runtime::PalletFoo`] is an alias to [`pallet_foo::Pallet`]. Same for `PalletBar`, and +//! `System` +//! * [`runtime::AllPalletsWithSystem`] is an alias for a tuple of all of the above. This type is +//! important to FRAME internals such as `executive`, as it implements traits such as +//! [`frame::traits::Hooks`]. +//! +//! ## Further Details +//! +//! * [`crate::reference_docs::frame_origin`] explores further details about the usage of +//! `RuntimeOrigin`. +//! * [`RuntimeCall`] is a particularly interesting composite enum as it dictates the encoding of an +//! extrinsic. See [`crate::reference_docs::signed_extensions`] for more information. +//! * See the documentation of [`construct_runtime`]. +//! * See the corresponding lecture in the [pba-book](https://polkadot-blockchain-academy.github.io/pba-book/frame/outer-enum/page.html). +//! +//! +//! [`construct_runtime`]: frame::runtime::prelude::construct_runtime +//! [`runtime::PalletFoo`]: crate::reference_docs::frame_runtime_types::runtime::PalletFoo +//! [`runtime::AllPalletsWithSystem`]: crate::reference_docs::frame_runtime_types::runtime::AllPalletsWithSystem +//! [`runtime`]: crate::reference_docs::frame_runtime_types::runtime +//! [`pallet_foo`]: crate::reference_docs::frame_runtime_types::pallet_foo +//! [`pallet_foo::Call`]: crate::reference_docs::frame_runtime_types::pallet_foo::Call +//! [`pallet_foo::Pallet`]: crate::reference_docs::frame_runtime_types::pallet_foo::Pallet +//! [`pallet_bar`]: crate::reference_docs::frame_runtime_types::pallet_bar +//! [`pallet_bar::GenesisConfig`]: crate::reference_docs::frame_runtime_types::pallet_bar::GenesisConfig +//! [`RuntimeEvent`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeEvent +//! [`RuntimeGenesisConfig`]: +//! crate::reference_docs::frame_runtime_types::runtime::RuntimeGenesisConfig +//! [`RuntimeOrigin`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeOrigin +//! [`OriginCaller`]: crate::reference_docs::frame_runtime_types::runtime::OriginCaller +//! [`RuntimeError`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeError +//! [`RuntimeCall`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeCall +//! [`RuntimeHoldReason`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeHoldReason + +use frame::prelude::*; + +#[docify::export] +#[frame::pallet(dev_mode)] +pub mod pallet_foo { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::origin] + #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] + pub enum Origin { + A, + B, + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet { + pub fn foo(_origin: OriginFor) -> DispatchResult { + todo!(); + } + + pub fn other(_origin: OriginFor) -> DispatchResult { + todo!(); + } + } +} + +#[docify::export] +#[frame::pallet(dev_mode)] +pub mod pallet_bar { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::genesis_config] + #[derive(DefaultNoBound)] + pub struct GenesisConfig { + pub initial_account: Option, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) {} + } + + #[pallet::call] + impl Pallet { + pub fn bar(_origin: OriginFor) -> DispatchResult { + todo!(); + } + } +} + +pub mod runtime { + use super::{pallet_bar, pallet_foo}; + use frame::{runtime::prelude::*, testing_prelude::*}; + + #[docify::export(runtime_exp)] + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletFoo: pallet_foo, + PalletBar: pallet_bar, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + impl pallet_foo::Config for Runtime {} + impl pallet_bar::Config for Runtime {} +} + +#[frame::pallet(dev_mode)] +pub mod pallet_with_specific_runtime_call { + use super::*; + use frame::traits::IsSubType; + + #[docify::export(custom_runtime_call)] + /// A pallet that wants to further narrow down what `RuntimeCall` is. + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeCall: IsSubType>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + // note that this pallet needs some `call` to have a `enum Call`. + #[pallet::call] + impl Pallet { + pub fn foo(_origin: OriginFor) -> DispatchResult { + todo!(); + } + } + + #[docify::export(custom_runtime_call_usages)] + impl Pallet { + fn _do_something_useful_with_runtime_call(call: ::RuntimeCall) { + // check if the runtime call given is of this pallet's variant. + let _maybe_my_call: Option<&Call> = call.is_sub_type(); + todo!(); + } + } + + #[docify::export(assert_equality)] + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + use core::any::TypeId; + assert_eq!( + TypeId::of::<::RuntimeCall>(), + TypeId::of::<::RuntimeCall>() + ); + } + } +} + +pub mod runtime_with_specific_runtime_call { + use super::pallet_with_specific_runtime_call; + use frame::{runtime::prelude::*, testing_prelude::*}; + + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletWithSpecificRuntimeCall: pallet_with_specific_runtime_call, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + #[docify::export(pallet_with_specific_runtime_call_impl)] + impl pallet_with_specific_runtime_call::Config for Runtime { + // an implementation of `IsSubType` is provided by `construct_runtime`. + type RuntimeCall = RuntimeCall; + } +} diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index c16122ee428..760bb442c16 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -43,16 +43,16 @@ pub mod extrinsic_encoding; // TODO: @jsdw https://github.com/paritytech/polkadot-sdk-docs/issues/42 pub mod signed_extensions; -/// Learn about *"Origin"* A topic in FRAME that enables complex account abstractions to be built. -// TODO: @shawntabrizi https://github.com/paritytech/polkadot-sdk-docs/issues/43 +/// Learn about *Origins*, a topic in FRAME that enables complex account abstractions to be built. pub mod frame_origin; /// Learn about how to write safe and defensive code in your FRAME runtime. // TODO: @CrackTheCode016 https://github.com/paritytech/polkadot-sdk-docs/issues/44 pub mod safe_defensive_programming; -/// Learn about composite enums in FRAME-based runtimes, such as "RuntimeEvent" and "RuntimeCall". -pub mod frame_composite_enums; +/// Learn about composite enums and other runtime level types, such as "RuntimeEvent" and +/// "RuntimeCall". +pub mod frame_runtime_types; /// Learn about how to make a pallet/runtime that is fee-less and instead uses another mechanism to /// control usage and sybil attacks. diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index a1715ba4900..a2e5d726fdd 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -163,6 +163,12 @@ pub mod runtime { ConstU32, ConstU64, ConstU8, }; + /// Primary types used to parameterize `EnsureOrigin` and `EnsureRootWithArg`. + pub use frame_system::{ + EnsureNever, EnsureNone, EnsureRoot, EnsureRootWithSuccess, EnsureSigned, + EnsureSignedBy, + }; + /// Types to define your runtime version. pub use sp_version::{create_runtime_str, runtime_version, RuntimeVersion}; diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs index b421d2aaffa..83049919d01 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -104,7 +104,7 @@ pub fn expand_outer_origin( #[doc = #doc_string] #[derive(Clone)] pub struct RuntimeOrigin { - caller: OriginCaller, + pub caller: OriginCaller, filter: #scrate::__private::sp_std::rc::Rc::RuntimeCall) -> bool>>, } diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 20b8d74310f..441b21e26ff 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -1480,22 +1480,11 @@ pub fn validate_unsigned(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::origin]` attribute allows you to define some origin for the pallet. /// -/// Item must be either a type alias, an enum, or a struct. It needs to be public. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::origin] -/// pub struct Origin(PhantomData<(T)>); -/// ``` -/// -/// **WARNING**: modifying origin changes the outer runtime origin. This outer runtime origin -/// can be stored on-chain (e.g. in `pallet-scheduler`), thus any change must be done with care -/// as it might require some migration. +/// --- /// -/// NOTE: for instantiable pallets, the origin must be generic over `T` and `I`. +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::origin`. #[proc_macro_attribute] pub fn origin(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index cd12da6de54..26fc1fe42c1 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -2274,9 +2274,8 @@ pub mod pallet_macros { pub use frame_support_procedural::{ composite_enum, config, disable_frame_system_supertrait_check, error, event, extra_constants, feeless_if, generate_deposit, generate_store, getter, hooks, - import_section, inherent, no_default, no_default_bounds, origin, pallet_section, - storage_prefix, storage_version, type_value, unbounded, validate_unsigned, weight, - whitelist_storage, + import_section, inherent, no_default, no_default_bounds, pallet_section, storage_prefix, + storage_version, type_value, unbounded, validate_unsigned, weight, whitelist_storage, }; /// Allows a pallet to declare a set of functions as a *dispatchable extrinsic*. In @@ -2718,7 +2717,7 @@ pub mod pallet_macros { /// } /// ``` pub use frame_support_procedural::storage; - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the conditions for a /// given work item to be valid. /// @@ -2726,21 +2725,21 @@ pub mod pallet_macros { /// should have the same signature as the function it is attached to, except that it should /// return a `bool` instead. pub use frame_support_procedural::task_condition; - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the index of a given /// work item. /// /// It takes an integer literal as input, which is then used to define the index. This /// index should be unique for each function in the `impl` block. pub use frame_support_procedural::task_index; - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define an iterator over the /// available work items for a task. /// /// It takes an iterator as input that yields a tuple with same types as the function /// arguments. pub use frame_support_procedural::task_list; - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) define the weight of a given work /// item. /// @@ -2773,6 +2772,61 @@ pub mod pallet_macros { /// Now, this can be executed as follows: #[doc = docify::embed!("src/tests/tasks.rs", tasks_work)] pub use frame_support_procedural::tasks_experimental; + + /// Allows a pallet to declare a type as an origin. + /// + /// If defined as such, this type will be amalgamated at the runtime level into + /// `RuntimeOrigin`, very similar to [`call`], [`error`] and [`event`]. See + /// [`composite_enum`] for similar cases. + /// + /// Origin is a complex FRAME topics and is further explained in `polkadot_sdk_docs`. + /// + /// ## Syntax Variants + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// /// On the spot declaration. + /// #[pallet::origin] + /// #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] + /// pub enum Origin { + /// Foo, + /// Bar, + /// } + /// } + /// ``` + /// + /// Or, more commonly used:/ + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] + /// pub enum RawOrigin { + /// Foo, + /// Bar, + /// } + /// + /// #[pallet::origin] + /// pub type Origin = RawOrigin; + /// } + /// ``` + /// + /// ## Warning + /// + /// Modifying any pallet's origin type will cause the runtime level origin type to also + /// change in encoding. If stored anywhere on-chain, this will require a data migration. + pub use frame_support_procedural::origin; } #[deprecated(note = "Will be removed after July 2023; Use `sp_runtime::traits` directly instead.")] diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 1405c7303f8..aa6841c008a 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -452,6 +452,7 @@ pub mod pallet { + Clone + OriginTrait; + #[docify::export(system_runtime_call)] /// The aggregated `RuntimeCall` type. #[pallet::no_default_bounds] type RuntimeCall: Parameter -- GitLab From 0cc9b9003c019400a8e26c2e1d5679ab654a63dc Mon Sep 17 00:00:00 2001 From: Petr Mensik Date: Tue, 27 Feb 2024 23:50:07 +0100 Subject: [PATCH 10/86] Add Polkadotters bootnoders per IBP application (#3423) Hey everyone, this PR will replace existing Polkadotters bootnodes for Polkadot, Kusama and Westend and add Paseo bootnode to the relay chain suite. At the same time, it will add new bootnodes for all the system parachains, including People on Westend. This PR is a part of our membership in the IBP, meaning that all the bootnodes are hosted on our hardware housed in the data center in Christchurch, New Zealand. All the bootnodes were tested with an empty chain spec file with the following command yielding 1 peer. The test commands used are as follows: ``` ./polkadot --base-path /tmp/node --reserved-only --chain paseo --reserved-nodes "/dns/paseo.bootnodes.polkadotters.com/tcp/30540/wss/p2p/12D3KooWPbbFy4TefEGTRF5eTYhq8LEzc4VAHdNUVCbY4nAnhqPP" ./polkadot --base-path /tmp/node --reserved-only --chain westend --reserved-nodes "/dns/westend.bootnodes.polkadotters.com/tcp/30310/wss/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5" ./polkadot --base-path /tmp/node --reserved-only --chain kusama --reserved-nodes "/dns/kusama.bootnodes.polkadotters.com/tcp/30313/wss/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG" ./polkadot --base-path /tmp/node --no-hardware-benchmarks --reserved-only --chain polkadot --reserved-nodes "/dns/polkadot.bootnodes.polkadotters.com/tcp/30316/wss/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain asset-hub-kusama --reserved-nodes "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30513/wss/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain asset-hub-polkadot --reserved-nodes "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30510/wss/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain asset-hub-westend --reserved-nodes "/dns/asset-hub-westend.bootnodes.polkadotters.com/tcp/30516/wss/p2p/12D3KooWNFYysCqmojxqjjaTfD2VkWBNngfyUKWjcR4WFixfHNTk" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain bridge-hub-kusama --reserved-nodes "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30522/wss/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain bridge-hub-kusama --reserved-nodes "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30522/wss/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain bridge-hub-westend --reserved-nodes "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30525/wss/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain collectives-polkadot --reserved-nodes "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain collectives-westend --reserved-nodes "/dns/collectives-westend.bootnodes.polkadotters.com/tcp/30531/wss/p2p/12D3KooWAFkXNSBfyPduZVgfS7pj5NuVpbU8Ee5gHeF8wvos7Yqn" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain people-westend --reserved-nodes "/dns/identity-westend.bootnodes.polkadotters.com/tcp/30534/wss/p2p/12D3KooWKr9San6KTM7REJ95cBaDoiciGcWnW8TTftEJgxGF5Ehb" ``` Best regards, Petr, Polkadotters --- cumulus/parachains/chain-specs/asset-hub-kusama.json | 4 +++- cumulus/parachains/chain-specs/asset-hub-polkadot.json | 4 +++- cumulus/parachains/chain-specs/asset-hub-westend.json | 4 +++- cumulus/parachains/chain-specs/bridge-hub-kusama.json | 4 +++- cumulus/parachains/chain-specs/bridge-hub-polkadot.json | 4 +++- cumulus/parachains/chain-specs/bridge-hub-westend.json | 4 +++- cumulus/parachains/chain-specs/collectives-polkadot.json | 4 +++- cumulus/parachains/chain-specs/collectives-westend.json | 4 +++- cumulus/parachains/chain-specs/people-westend.json | 6 ++++-- polkadot/node/service/chain-specs/kusama.json | 4 ++-- polkadot/node/service/chain-specs/paseo.json | 4 +++- polkadot/node/service/chain-specs/polkadot.json | 4 ++-- polkadot/node/service/chain-specs/westend.json | 4 ++-- 13 files changed, 37 insertions(+), 17 deletions(-) diff --git a/cumulus/parachains/chain-specs/asset-hub-kusama.json b/cumulus/parachains/chain-specs/asset-hub-kusama.json index fba74b17f96..654275eade8 100644 --- a/cumulus/parachains/chain-specs/asset-hub-kusama.json +++ b/cumulus/parachains/chain-specs/asset-hub-kusama.json @@ -25,7 +25,9 @@ "/dns/statemine-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWCKUrE5uaXQ288ko3Ex3zCyozyJLG47KEYTopinnXNtYL", "/dns/mine14.rotko.net/tcp/33524/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", "/dns/mine14.rotko.net/tcp/34524/ws/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", - "/dns/mine14.rotko.net/tcp/35524/wss/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu" + "/dns/mine14.rotko.net/tcp/35524/wss/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", + "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30511/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU", + "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30513/wss/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-polkadot.json b/cumulus/parachains/chain-specs/asset-hub-polkadot.json index 685a00ddc71..454060d2a87 100644 --- a/cumulus/parachains/chain-specs/asset-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/asset-hub-polkadot.json @@ -25,7 +25,9 @@ "/dns/statemint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWLKxHom7f3XawRJqrF8RwiKK5Sj3qZqz5c7hF6eJeXhTx", "/dns/mint14.rotko.net/tcp/33514/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", "/dns/mint14.rotko.net/tcp/34514/ws/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", - "/dns/mint14.rotko.net/tcp/35514/wss/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW" + "/dns/mint14.rotko.net/tcp/35514/wss/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", + "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30508/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R", + "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30510/wss/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-westend.json b/cumulus/parachains/chain-specs/asset-hub-westend.json index 6f42b5f7d8b..670935c9d24 100644 --- a/cumulus/parachains/chain-specs/asset-hub-westend.json +++ b/cumulus/parachains/chain-specs/asset-hub-westend.json @@ -23,7 +23,9 @@ "/dns/westmint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDoq4PVdWm5nzRSvEz3DSSKjVgRhWVUaKyi5JMKwJKYbk", "/dns/wmint14.rotko.net/tcp/33534/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", "/dns/wmint14.rotko.net/tcp/34534/ws/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", - "/dns/wmint14.rotko.net/tcp/35534/wss/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN" + "/dns/wmint14.rotko.net/tcp/35534/wss/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", + "/dns/asset-hub-westend.bootnodes.polkadotters.com/tcp/30514/p2p/12D3KooWNFYysCqmojxqjjaTfD2VkWBNngfyUKWjcR4WFixfHNTk", + "/dns/asset-hub-westend.bootnodes.polkadotters.com/tcp/30516/wss/p2p/12D3KooWNFYysCqmojxqjjaTfD2VkWBNngfyUKWjcR4WFixfHNTk" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-kusama.json b/cumulus/parachains/chain-specs/bridge-hub-kusama.json index 0ef81806cc5..90b70b05016 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-kusama.json +++ b/cumulus/parachains/chain-specs/bridge-hub-kusama.json @@ -25,7 +25,9 @@ "/dns/bridgehub-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWQMWofXj8v3RroDNnrhv1iURqm8vnaG98AdGnCn2YoDcW", "/dns/kbr13.rotko.net/tcp/33553/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", "/dns/kbr13.rotko.net/tcp/34553/ws/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", - "/dns/kbr13.rotko.net/tcp/35553/wss/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66" + "/dns/kbr13.rotko.net/tcp/35553/wss/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", + "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30520/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV", + "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30522/wss/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json index 130bdf31ef2..a9444b89e1e 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json @@ -21,7 +21,9 @@ "/dns/bridgehub-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWPNZm78tWUmKbta3SXdkqTPsquRc8ekEbJjZsGGi7YiRi", "/dns/pbr13.rotko.net/tcp/33543/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", "/dns/pbr13.rotko.net/tcp/34543/ws/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", - "/dns/pbr13.rotko.net/tcp/35543/wss/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw" + "/dns/pbr13.rotko.net/tcp/35543/wss/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", + "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30517/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB", + "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30519/wss/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-westend.json b/cumulus/parachains/chain-specs/bridge-hub-westend.json index 018ab0ee6fd..447207a5810 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-westend.json +++ b/cumulus/parachains/chain-specs/bridge-hub-westend.json @@ -19,7 +19,9 @@ "/dns/bridgehub-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWBsBArCMxmQyo3feCEqMWuwyhb2LTRK8hmCCJxgrNeMke", "/dns/wbr13.rotko.net/tcp/33563/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", "/dns/wbr13.rotko.net/tcp/34563/ws/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", - "/dns/wbr13.rotko.net/tcp/35563/wss/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD" + "/dns/wbr13.rotko.net/tcp/35563/wss/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", + "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30523/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj", + "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30525/wss/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index e9f690234e4..259669cf37a 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -25,7 +25,9 @@ "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", "/dns/pch13.rotko.net/tcp/33573/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", "/dns/pch13.rotko.net/tcp/34573/ws/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", - "/dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH" + "/dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", + "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30526/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", + "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-westend.json b/cumulus/parachains/chain-specs/collectives-westend.json index ffe73b5a05a..e459c631f8b 100644 --- a/cumulus/parachains/chain-specs/collectives-westend.json +++ b/cumulus/parachains/chain-specs/collectives-westend.json @@ -25,7 +25,9 @@ "/dns/westend-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWPFM93jgm4pgxx8PM8WJKAJF49qia8jRB95uciUQwYh7m", "/dns/wch13.rotko.net/tcp/33593/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", "/dns/wch13.rotko.net/tcp/34593/ws/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", - "/dns/wch13.rotko.net/tcp/35593/wss/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr" + "/dns/wch13.rotko.net/tcp/35593/wss/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", + "/dns/collectives-westend.bootnodes.polkadotters.com/tcp/30529/p2p/12D3KooWAFkXNSBfyPduZVgfS7pj5NuVpbU8Ee5gHeF8wvos7Yqn", + "/dns/collectives-westend.bootnodes.polkadotters.com/tcp/30531/wss/p2p/12D3KooWAFkXNSBfyPduZVgfS7pj5NuVpbU8Ee5gHeF8wvos7Yqn" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/people-westend.json b/cumulus/parachains/chain-specs/people-westend.json index fa29853c70b..29fa0c9cde7 100644 --- a/cumulus/parachains/chain-specs/people-westend.json +++ b/cumulus/parachains/chain-specs/people-westend.json @@ -10,7 +10,9 @@ "/dns/westend-people-collator-node-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWGVYTVKW7tYe51JvetvGvVLDPXzqQX1mueJgz14FgkmHG", "/dns/westend-people-collator-node-2.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWGVYTVKW7tYe51JvetvGvVLDPXzqQX1mueJgz14FgkmHG", "/dns/westend-people-collator-node-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWCF1eA2Gap69zgXD7Df3e9DqDUsGoByocggTGejoHjK23", - "/dns/westend-people-collator-node-3.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWCF1eA2Gap69zgXD7Df3e9DqDUsGoByocggTGejoHjK23" + "/dns/westend-people-collator-node-3.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWCF1eA2Gap69zgXD7Df3e9DqDUsGoByocggTGejoHjK23", + "/dns/identity-westend.bootnodes.polkadotters.com/tcp/30532/p2p/12D3KooWKr9San6KTM7REJ95cBaDoiciGcWnW8TTftEJgxGF5Ehb", + "/dns/identity-westend.bootnodes.polkadotters.com/tcp/30534/wss/p2p/12D3KooWKr9San6KTM7REJ95cBaDoiciGcWnW8TTftEJgxGF5Ehb" ], "telemetryEndpoints": null, "protocolId": null, @@ -79,4 +81,4 @@ "childrenDefault": {} } } -} \ No newline at end of file +} diff --git a/polkadot/node/service/chain-specs/kusama.json b/polkadot/node/service/chain-specs/kusama.json index 979550c7570..fd60cb8b6c1 100644 --- a/polkadot/node/service/chain-specs/kusama.json +++ b/polkadot/node/service/chain-specs/kusama.json @@ -16,8 +16,8 @@ "/dns/boot-node.helikon.io/tcp/7062/wss/p2p/12D3KooWL4KPqfAsPE2aY1g5Zo1CxsDwcdJ7mmAghK7cg6M2fdbD", "/dns/kusama.bootnode.amforc.com/tcp/30333/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9", "/dns/kusama.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9", - "/dns/kusama-bootnode.polkadotters.com/tcp/30333/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", - "/dns/kusama-bootnode.polkadotters.com/tcp/30334/wss/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", + "/dns/kusama.bootnodes.polkadotters.com/tcp/30311/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", + "/dns/kusama.bootnodes.polkadotters.com/tcp/30313/wss/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", "/dns/boot-cr.gatotech.network/tcp/33200/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", "/dns/boot-cr.gatotech.network/tcp/35200/wss/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", "/dns/boot-kusama.metaspan.io/tcp/23012/p2p/12D3KooWE1tq9ZL9AAxMiUBBqy1ENmh5pwfWabnoBPMo8gFPXhn6", diff --git a/polkadot/node/service/chain-specs/paseo.json b/polkadot/node/service/chain-specs/paseo.json index c8f2b58533c..8db75ff137b 100644 --- a/polkadot/node/service/chain-specs/paseo.json +++ b/polkadot/node/service/chain-specs/paseo.json @@ -12,7 +12,9 @@ "/dns/boot-node.helikon.io/tcp/10020/p2p/12D3KooWBetfzZpf6tGihKrqCo5z854Ub4ZNAUUTRT6eYHNh7FYi", "/dns/boot-node.helikon.io/tcp/10022/wss/p2p/12D3KooWBetfzZpf6tGihKrqCo5z854Ub4ZNAUUTRT6eYHNh7FYi", "/dns/pso16.rotko.net/tcp/33246/p2p/12D3KooWRH8eBMhw8c7bucy6pJfy94q4dKpLkF3pmeGohHmemdRu", - "/dns/pso16.rotko.net/tcp/35246/wss/p2p/12D3KooWRH8eBMhw8c7bucy6pJfy94q4dKpLkF3pmeGohHmemdRu" + "/dns/pso16.rotko.net/tcp/35246/wss/p2p/12D3KooWRH8eBMhw8c7bucy6pJfy94q4dKpLkF3pmeGohHmemdRu", + "/dns/paseo.bootnodes.polkadotters.com/tcp/30538/p2p/12D3KooWPbbFy4TefEGTRF5eTYhq8LEzc4VAHdNUVCbY4nAnhqPP", + "/dns/paseo.bootnodes.polkadotters.com/tcp/30540/wss/p2p/12D3KooWPbbFy4TefEGTRF5eTYhq8LEzc4VAHdNUVCbY4nAnhqPP" ], "telemetryEndpoints": [ [ diff --git a/polkadot/node/service/chain-specs/polkadot.json b/polkadot/node/service/chain-specs/polkadot.json index 71dbb900403..c235cdd09d8 100644 --- a/polkadot/node/service/chain-specs/polkadot.json +++ b/polkadot/node/service/chain-specs/polkadot.json @@ -17,8 +17,8 @@ "/dns/boot-node.helikon.io/tcp/7072/wss/p2p/12D3KooWS9ZcvRxyzrSf6p63QfTCWs12nLoNKhGux865crgxVA4H", "/dns/polkadot.bootnode.amforc.com/tcp/30333/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3", "/dns/polkadot.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3", - "/dns/polkadot-bootnode.polkadotters.com/tcp/30333/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", - "/dns/polkadot-bootnode.polkadotters.com/tcp/30334/wss/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", + "/dns/polkadot.bootnodes.polkadotters.com/tcp/30314/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", + "/dns/polkadot.bootnodes.polkadotters.com/tcp/30316/wss/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", "/dns/boot-cr.gatotech.network/tcp/33100/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", "/dns/boot-cr.gatotech.network/tcp/35100/wss/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", "/dns/boot-polkadot.metaspan.io/tcp/13012/p2p/12D3KooWRjHFApinuqSBjoaDjQHvxwubQSpEVy5hrgC9Smvh92WF", diff --git a/polkadot/node/service/chain-specs/westend.json b/polkadot/node/service/chain-specs/westend.json index 697675871fc..775f3e72ac7 100644 --- a/polkadot/node/service/chain-specs/westend.json +++ b/polkadot/node/service/chain-specs/westend.json @@ -14,8 +14,8 @@ "/dns/boot-node.helikon.io/tcp/7082/wss/p2p/12D3KooWRFDPyT8vA8mLzh6dJoyujn4QNjeqi6Ch79eSMz9beKXC", "/dns/westend.bootnode.amforc.com/tcp/30333/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8", "/dns/westend.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8", - "/dns/westend-bootnode.polkadotters.com/tcp/30333/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", - "/dns/westend-bootnode.polkadotters.com/tcp/30334/wss/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", + "/dns/westend.bootnodes.polkadotters.com/tcp/30308/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", + "/dns/westend.bootnodes.polkadotters.com/tcp/30310/wss/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", "/dns/boot-cr.gatotech.network/tcp/33300/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", "/dns/boot-cr.gatotech.network/tcp/35300/wss/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", "/dns/boot-westend.metaspan.io/tcp/33012/p2p/12D3KooWNTau7iG4G9cUJSwwt2QJP1W88pUf2SgqsHjRU2RL8pfa", -- GitLab From 95da6583602691fd0c8403e9eda26db1e10dafc4 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Wed, 28 Feb 2024 13:13:09 +1100 Subject: [PATCH 11/86] Introduce storage attr macro `#[disable_try_decode_storage]` and set it on `System::Events` and `ParachainSystem::HostConfiguration` (#3454) Closes https://github.com/paritytech/polkadot-sdk/issues/2560 Allows marking storage items with `#[disable_try_decode_storage]`, and uses it with `System::Events`. Question: what's the recommended way to write a test for this? I couldn't find a test for similar existing macro `#[whitelist_storage]`. --- cumulus/pallets/parachain-system/src/lib.rs | 1 + prdoc/pr_3454.prdoc | 19 +++++++++++++++ substrate/frame/support/procedural/src/lib.rs | 19 +++++++++++++++ .../procedural/src/pallet/expand/storage.rs | 5 +++- .../procedural/src/pallet/parse/storage.rs | 24 +++++++++++++++++-- substrate/frame/support/src/lib.rs | 17 +++++++++++-- .../storage_invalid_attribute.stderr | 2 +- substrate/frame/system/src/lib.rs | 1 + 8 files changed, 82 insertions(+), 6 deletions(-) create mode 100644 prdoc/pr_3454.prdoc diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index d86a67e58f4..3439227bc5b 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -840,6 +840,7 @@ pub mod pallet { /// /// This data is also absent from the genesis. #[pallet::storage] + #[pallet::disable_try_decode_storage] #[pallet::getter(fn host_configuration)] pub(super) type HostConfiguration = StorageValue<_, AbridgedHostConfiguration>; diff --git a/prdoc/pr_3454.prdoc b/prdoc/pr_3454.prdoc new file mode 100644 index 00000000000..7f564258f23 --- /dev/null +++ b/prdoc/pr_3454.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Introduce storage attr macro #[disable_try_decode_storage] and set it on System::Events and ParachainSystem::HostConfiguration" + +doc: + - audience: Runtime Dev + description: | + Allows marking storage items with \#[disable_try_decode_storage], which disables that storage item from being decoded + during try_decode_entire_state calls. + + Applied the attribute to System::Events to close https://github.com/paritytech/polkadot-sdk/issues/2560. + Applied the attribute to ParachainSystem::HostConfiguration to resolve periodic issues with it. + +crates: + - name: frame-support-procedural + - name: frame-system + - name: cumulus-pallet-parachain-system + diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 441b21e26ff..78729ee3dc3 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -1371,6 +1371,25 @@ pub fn whitelist_storage(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// The optional attribute `#[pallet::disable_try_decode_storage]` will declare the +/// storage as whitelisted from decoding during try-runtime checks. This should only be +/// attached to transient storage which cannot be migrated during runtime upgrades. +/// +/// ### Example +/// ```ignore +/// #[pallet::storage] +/// #[pallet::disable_try_decode_storage] +/// pub(super) type Events = StorageValue<_, Vec>>, ValueQuery>; +/// ``` +/// +/// NOTE: As with all `pallet::*` attributes, this one _must_ be written as +/// `#[pallet::disable_try_decode_storage]` and can only be placed inside a `pallet` module in order +/// for it to work properly. +#[proc_macro_attribute] +pub fn disable_try_decode_storage(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + /// The `#[pallet::type_value]` attribute lets you define a struct implementing the `Get` trait /// to ease the use of storage types. This attribute is meant to be used alongside /// [`#[pallet::storage]`](`macro@storage`) to define a storage's default value. This attribute diff --git a/substrate/frame/support/procedural/src/pallet/expand/storage.rs b/substrate/frame/support/procedural/src/pallet/expand/storage.rs index 96c2c8e3120..937b068cfab 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/storage.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/storage.rs @@ -834,7 +834,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { .storages .iter() .filter_map(|storage| { - if storage.cfg_attrs.is_empty() { + // A little hacky; don't generate for cfg gated storages to not get compile errors + // when building "frame-feature-testing" gated storages in the "frame-support-test" + // crate. + if storage.try_decode && storage.cfg_attrs.is_empty() { let ident = &storage.ident; let gen = &def.type_use_generics(storage.attr_span); Some(quote::quote_spanned!(storage.attr_span => #ident<#gen> )) diff --git a/substrate/frame/support/procedural/src/pallet/parse/storage.rs b/substrate/frame/support/procedural/src/pallet/parse/storage.rs index d1c7ba2e5e3..9d96a18b569 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/storage.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/storage.rs @@ -29,6 +29,7 @@ mod keyword { syn::custom_keyword!(storage_prefix); syn::custom_keyword!(unbounded); syn::custom_keyword!(whitelist_storage); + syn::custom_keyword!(disable_try_decode_storage); syn::custom_keyword!(OptionQuery); syn::custom_keyword!(ResultQuery); syn::custom_keyword!(ValueQuery); @@ -39,11 +40,13 @@ mod keyword { /// * `#[pallet::storage_prefix = "CustomName"]` /// * `#[pallet::unbounded]` /// * `#[pallet::whitelist_storage] +/// * `#[pallet::disable_try_decode_storage]` pub enum PalletStorageAttr { Getter(syn::Ident, proc_macro2::Span), StorageName(syn::LitStr, proc_macro2::Span), Unbounded(proc_macro2::Span), WhitelistStorage(proc_macro2::Span), + DisableTryDecodeStorage(proc_macro2::Span), } impl PalletStorageAttr { @@ -53,6 +56,7 @@ impl PalletStorageAttr { Self::StorageName(_, span) | Self::Unbounded(span) | Self::WhitelistStorage(span) => *span, + Self::DisableTryDecodeStorage(span) => *span, } } } @@ -93,6 +97,9 @@ impl syn::parse::Parse for PalletStorageAttr { } else if lookahead.peek(keyword::whitelist_storage) { content.parse::()?; Ok(Self::WhitelistStorage(attr_span)) + } else if lookahead.peek(keyword::disable_try_decode_storage) { + content.parse::()?; + Ok(Self::DisableTryDecodeStorage(attr_span)) } else { Err(lookahead.error()) } @@ -104,6 +111,7 @@ struct PalletStorageAttrInfo { rename_as: Option, unbounded: bool, whitelisted: bool, + try_decode: bool, } impl PalletStorageAttrInfo { @@ -112,6 +120,7 @@ impl PalletStorageAttrInfo { let mut rename_as = None; let mut unbounded = false; let mut whitelisted = false; + let mut disable_try_decode_storage = false; for attr in attrs { match attr { PalletStorageAttr::Getter(ident, ..) if getter.is_none() => getter = Some(ident), @@ -119,6 +128,8 @@ impl PalletStorageAttrInfo { rename_as = Some(name), PalletStorageAttr::Unbounded(..) if !unbounded => unbounded = true, PalletStorageAttr::WhitelistStorage(..) if !whitelisted => whitelisted = true, + PalletStorageAttr::DisableTryDecodeStorage(..) if !disable_try_decode_storage => + disable_try_decode_storage = true, attr => return Err(syn::Error::new( attr.attr_span(), @@ -127,7 +138,13 @@ impl PalletStorageAttrInfo { } } - Ok(PalletStorageAttrInfo { getter, rename_as, unbounded, whitelisted }) + Ok(PalletStorageAttrInfo { + getter, + rename_as, + unbounded, + whitelisted, + try_decode: !disable_try_decode_storage, + }) } } @@ -186,6 +203,8 @@ pub struct StorageDef { pub unbounded: bool, /// Whether or not reads to this storage key will be ignored by benchmarking pub whitelisted: bool, + /// Whether or not to try to decode the storage key when running try-runtime checks. + pub try_decode: bool, /// Whether or not a default hasher is allowed to replace `_` pub use_default_hasher: bool, } @@ -775,7 +794,7 @@ impl StorageDef { }; let attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; - let PalletStorageAttrInfo { getter, rename_as, mut unbounded, whitelisted } = + let PalletStorageAttrInfo { getter, rename_as, mut unbounded, whitelisted, try_decode } = PalletStorageAttrInfo::from_attrs(attrs)?; // set all storages to be unbounded if dev_mode is enabled @@ -921,6 +940,7 @@ impl StorageDef { named_generics, unbounded, whitelisted, + try_decode, use_default_hasher, }) } diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 26fc1fe42c1..3e97a384275 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -982,6 +982,7 @@ pub mod pallet_prelude { /// * [`pallet::storage_prefix = "SomeName"`](#palletstorage_prefix--somename-optional) /// * [`pallet::unbounded`](#palletunbounded-optional) /// * [`pallet::whitelist_storage`](#palletwhitelist_storage-optional) +/// * [`pallet::disable_try_decode_storage`](#palletdisable_try_decode_storage-optional) /// * [`cfg(..)`](#cfg-for-storage) (on storage items) /// * [`pallet::type_value`](#type-value-pallettype_value-optional) /// * [`pallet::genesis_config`](#genesis-config-palletgenesis_config-optional) @@ -1498,6 +1499,15 @@ pub mod pallet_prelude { /// [`pallet::whitelist_storage`](frame_support::pallet_macros::whitelist_storage) /// for more info. /// +/// ## `#[pallet::disable_try_decode_storage]` (optional) +/// +/// The optional attribute `#[pallet::disable_try_decode_storage]` will declare the storage as +/// whitelisted state decoding during try-runtime logic. +/// +/// See +/// [`pallet::disable_try_decode_storage`](frame_support::pallet_macros::disable_try_decode_storage) +/// for more info. +/// /// ## `#[cfg(..)]` (for storage) /// The optional attributes `#[cfg(..)]` allow conditional compilation for the storage. /// @@ -2272,8 +2282,8 @@ pub use frame_support_procedural::pallet; /// Contains macro stubs for all of the pallet:: macros pub mod pallet_macros { pub use frame_support_procedural::{ - composite_enum, config, disable_frame_system_supertrait_check, error, event, - extra_constants, feeless_if, generate_deposit, generate_store, getter, hooks, + composite_enum, config, disable_frame_system_supertrait_check, disable_try_decode_storage, + error, event, extra_constants, feeless_if, generate_deposit, generate_store, getter, hooks, import_section, inherent, no_default, no_default_bounds, pallet_section, storage_prefix, storage_version, type_value, unbounded, validate_unsigned, weight, whitelist_storage, }; @@ -2698,6 +2708,8 @@ pub mod pallet_macros { /// * [`macro@getter`]: Creates a custom getter function. /// * [`macro@storage_prefix`]: Overrides the default prefix of the storage item. /// * [`macro@unbounded`]: Declares the storage item as unbounded. + /// * [`macro@disable_try_decode_storage`]: Declares that try-runtime checks should not + /// attempt to decode the storage item. /// /// #### Example /// ``` @@ -2713,6 +2725,7 @@ pub mod pallet_macros { /// #[pallet::getter(fn foo)] /// #[pallet::storage_prefix = "OtherFoo"] /// #[pallet::unbounded] + /// #[pallet::disable_try_decode_storage] /// pub type Foo = StorageValue<_, u32, ValueQuery>; /// } /// ``` diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr index 7f125526edf..519fadaa604 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr @@ -1,4 +1,4 @@ -error: expected one of: `getter`, `storage_prefix`, `unbounded`, `whitelist_storage` +error: expected one of: `getter`, `storage_prefix`, `unbounded`, `whitelist_storage`, `disable_try_decode_storage` --> tests/pallet_ui/storage_invalid_attribute.rs:33:12 | 33 | #[pallet::generate_store(pub trait Store)] diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index aa6841c008a..c527a483d88 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -894,6 +894,7 @@ pub mod pallet { /// just in case someone still reads them from within the runtime. #[pallet::storage] #[pallet::whitelist_storage] + #[pallet::disable_try_decode_storage] #[pallet::unbounded] pub(super) type Events = StorageValue<_, Vec>>, ValueQuery>; -- GitLab From 7ec0b8741b4ead5bc85411b30e53b459a03b1938 Mon Sep 17 00:00:00 2001 From: maksimryndin Date: Wed, 28 Feb 2024 05:05:54 +0100 Subject: [PATCH 12/86] Collator overseer builder unification (#3335) resolve https://github.com/paritytech/polkadot-sdk/issues/3116 a follow-up on https://github.com/paritytech/polkadot-sdk/pull/3061#pullrequestreview-1847530265: - [x] reuse collator overseer builder for polkadot-node and collator - [x] run zombienet test (0001-parachains-smoke-test.toml) - [x] make wasm build errors more user-friendly for an easier problem detection when using different toolchains in Rust --------- Co-authored-by: ordian Co-authored-by: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> --- Cargo.lock | 1 + .../relay-chain-minimal-node/Cargo.toml | 1 + .../src/collator_overseer.rs | 144 +----------------- .../relay-chain-minimal-node/src/lib.rs | 30 ++-- .../relay-chain-minimal-node/src/network.rs | 8 +- .../src/variants/back_garbage_candidate.rs | 15 +- polkadot/node/malus/src/variants/common.rs | 4 - .../variants/dispute_finalized_candidates.rs | 17 +-- .../src/variants/dispute_valid_candidates.rs | 15 +- .../src/variants/suggest_garbage_candidate.rs | 17 +-- .../malus/src/variants/support_disabled.rs | 15 +- polkadot/node/service/src/lib.rs | 13 +- polkadot/node/service/src/overseer.rs | 76 +++------ .../subsystem-types/src/runtime_client.rs | 64 +++++++- .../utils/wasm-builder/src/prerequisites.rs | 20 ++- 15 files changed, 162 insertions(+), 278 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c52feafe7a7..27ba43780ed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4166,6 +4166,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-overseer", "polkadot-primitives", + "polkadot-service", "sc-authority-discovery", "sc-client-api", "sc-network", diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 0b1e001e007..98240c92ada 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -24,6 +24,7 @@ polkadot-node-collation-generation = { path = "../../../polkadot/node/collation- polkadot-node-core-runtime-api = { path = "../../../polkadot/node/core/runtime-api" } polkadot-node-core-chain-api = { path = "../../../polkadot/node/core/chain-api" } polkadot-node-core-prospective-parachains = { path = "../../../polkadot/node/core/prospective-parachains" } +polkadot-service = { path = "../../../polkadot/node/service" } # substrate deps sc-authority-discovery = { path = "../../../substrate/client/authority-discovery" } diff --git a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs index 5f5bf338ef9..f01ef8b05ec 100644 --- a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -15,159 +15,29 @@ // along with Polkadot. If not, see . use futures::{select, StreamExt}; -use parking_lot::Mutex; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; -use polkadot_availability_recovery::AvailabilityRecoverySubsystem; -use polkadot_collator_protocol::{CollatorProtocolSubsystem, ProtocolSide}; -use polkadot_network_bridge::{ - Metrics as NetworkBridgeMetrics, NetworkBridgeRx as NetworkBridgeRxSubsystem, - NetworkBridgeTx as NetworkBridgeTxSubsystem, -}; -use polkadot_node_collation_generation::CollationGenerationSubsystem; -use polkadot_node_core_chain_api::ChainApiSubsystem; -use polkadot_node_core_prospective_parachains::ProspectiveParachainsSubsystem; -use polkadot_node_core_runtime_api::RuntimeApiSubsystem; -use polkadot_node_network_protocol::{ - peer_set::{PeerSet, PeerSetProtocolNames}, - request_response::{ - v1::{self, AvailableDataFetchingRequest}, - v2, IncomingRequestReceiver, ReqProtocolNames, - }, -}; -use polkadot_node_subsystem_util::metrics::{prometheus::Registry, Metrics}; use polkadot_overseer::{ - BlockInfo, DummySubsystem, Handle, Overseer, OverseerConnector, OverseerHandle, SpawnGlue, - UnpinHandle, + BlockInfo, Handle, Overseer, OverseerConnector, OverseerHandle, SpawnGlue, UnpinHandle, }; -use polkadot_primitives::CollatorPair; +use polkadot_service::overseer::{collator_overseer_builder, OverseerGenArgs}; -use sc_authority_discovery::Service as AuthorityDiscoveryService; -use sc_network::{NetworkStateInfo, NotificationService}; use sc_service::TaskManager; use sc_utils::mpsc::tracing_unbounded; -use cumulus_primitives_core::relay_chain::{Block, Hash as PHash}; use cumulus_relay_chain_interface::RelayChainError; use crate::BlockChainRpcClient; -/// Arguments passed for overseer construction. -pub(crate) struct CollatorOverseerGenArgs<'a> { - /// Runtime client generic, providing the `ProvieRuntimeApi` trait besides others. - pub runtime_client: Arc, - /// Underlying network service implementation. - pub network_service: Arc>, - /// Syncing oracle. - pub sync_oracle: Box, - /// Underlying authority discovery service. - pub authority_discovery_service: AuthorityDiscoveryService, - /// Receiver for collation request protocol v1. - pub collation_req_receiver_v1: IncomingRequestReceiver, - /// Receiver for collation request protocol v2. - pub collation_req_receiver_v2: IncomingRequestReceiver, - /// Receiver for availability request protocol - pub available_data_req_receiver: IncomingRequestReceiver, - /// Prometheus registry, commonly used for production systems, less so for test. - pub registry: Option<&'a Registry>, - /// Task spawner to be used throughout the overseer and the APIs it provides. - pub spawner: sc_service::SpawnTaskHandle, - /// Determines the behavior of the collator. - pub collator_pair: CollatorPair, - /// Request response protocols - pub req_protocol_names: ReqProtocolNames, - /// Peerset protocols name mapping - pub peer_set_protocol_names: PeerSetProtocolNames, - /// Notification services for validation/collation protocols. - pub notification_services: HashMap>, -} - fn build_overseer( connector: OverseerConnector, - CollatorOverseerGenArgs { - runtime_client, - network_service, - sync_oracle, - authority_discovery_service, - collation_req_receiver_v1, - collation_req_receiver_v2, - available_data_req_receiver, - registry, - spawner, - collator_pair, - req_protocol_names, - peer_set_protocol_names, - notification_services, - }: CollatorOverseerGenArgs<'_>, + args: OverseerGenArgs, ) -> Result< (Overseer, Arc>, OverseerHandle), RelayChainError, > { - let spawner = SpawnGlue(spawner); - let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; - let notification_sinks = Arc::new(Mutex::new(HashMap::new())); - - let builder = Overseer::builder() - .availability_distribution(DummySubsystem) - .availability_recovery(AvailabilityRecoverySubsystem::for_collator( - available_data_req_receiver, - Metrics::register(registry)?, - )) - .availability_store(DummySubsystem) - .bitfield_distribution(DummySubsystem) - .bitfield_signing(DummySubsystem) - .candidate_backing(DummySubsystem) - .candidate_validation(DummySubsystem) - .pvf_checker(DummySubsystem) - .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) - .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) - .collator_protocol({ - let side = ProtocolSide::Collator { - peer_id: network_service.local_peer_id(), - collator_pair, - request_receiver_v1: collation_req_receiver_v1, - request_receiver_v2: collation_req_receiver_v2, - metrics: Metrics::register(registry)?, - }; - CollatorProtocolSubsystem::new(side) - }) - .network_bridge_rx(NetworkBridgeRxSubsystem::new( - network_service.clone(), - authority_discovery_service.clone(), - sync_oracle, - network_bridge_metrics.clone(), - peer_set_protocol_names.clone(), - notification_services, - notification_sinks.clone(), - )) - .network_bridge_tx(NetworkBridgeTxSubsystem::new( - network_service, - authority_discovery_service, - network_bridge_metrics, - req_protocol_names, - peer_set_protocol_names, - notification_sinks, - )) - .provisioner(DummySubsystem) - .runtime_api(RuntimeApiSubsystem::new( - runtime_client.clone(), - Metrics::register(registry)?, - spawner.clone(), - )) - .statement_distribution(DummySubsystem) - .prospective_parachains(ProspectiveParachainsSubsystem::new(Metrics::register(registry)?)) - .approval_distribution(DummySubsystem) - .approval_voting(DummySubsystem) - .gossip_support(DummySubsystem) - .dispute_coordinator(DummySubsystem) - .dispute_distribution(DummySubsystem) - .chain_selection(DummySubsystem) - .activation_external_listeners(Default::default()) - .span_per_active_leaf(Default::default()) - .active_leaves(Default::default()) - .supports_parachains(runtime_client) - .metrics(Metrics::register(registry)?) - .spawner(spawner); + let builder = + collator_overseer_builder(args).map_err(|e| RelayChainError::Application(e.into()))?; builder .build_with_connector(connector) @@ -175,7 +45,7 @@ fn build_overseer( } pub(crate) fn spawn_overseer( - overseer_args: CollatorOverseerGenArgs, + overseer_args: OverseerGenArgs, task_manager: &TaskManager, relay_chain_rpc_client: Arc, ) -> Result { diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index d121d2d3356..4bccca59fe3 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use collator_overseer::{CollatorOverseerGenArgs, NewMinimalNode}; +use collator_overseer::NewMinimalNode; use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface, RelayChainResult}; use cumulus_relay_chain_rpc_interface::{RelayChainRpcClient, RelayChainRpcInterface, Url}; @@ -29,6 +29,7 @@ use polkadot_node_network_protocol::{ use polkadot_node_subsystem_util::metrics::prometheus::Registry; use polkadot_primitives::CollatorPair; +use polkadot_service::{overseer::OverseerGenArgs, IsParachainNode}; use sc_authority_discovery::Service as AuthorityDiscoveryService; use sc_network::{config::FullNetworkConfiguration, Event, NetworkEventStream, NetworkService}; @@ -172,10 +173,10 @@ async fn new_minimal_relay_chain( } let genesis_hash = relay_chain_rpc_client.block_get_hash(Some(0)).await?.unwrap_or_default(); - let peer_set_protocol_names = + let peerset_protocol_names = PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; - let notification_services = peer_sets_info(is_authority, &peer_set_protocol_names) + let notification_services = peer_sets_info(is_authority, &peerset_protocol_names) .into_iter() .map(|(config, (peerset, service))| { net_config.add_notification_protocol(config); @@ -184,14 +185,14 @@ async fn new_minimal_relay_chain( .collect::>>(); let request_protocol_names = ReqProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); - let (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) = + let (collation_req_v1_receiver, collation_req_v2_receiver, available_data_req_receiver) = build_request_response_protocol_receivers(&request_protocol_names, &mut net_config); let best_header = relay_chain_rpc_client .chain_get_header(None) .await? .ok_or_else(|| RelayChainError::RpcCallError("Unable to fetch best header".to_string()))?; - let (network, network_starter, sync_oracle) = build_collator_network( + let (network, network_starter, sync_service) = build_collator_network( &config, net_config, task_manager.spawn_handle(), @@ -208,19 +209,20 @@ async fn new_minimal_relay_chain( prometheus_registry.cloned(), ); - let overseer_args = CollatorOverseerGenArgs { + let overseer_args = OverseerGenArgs { runtime_client: relay_chain_rpc_client.clone(), network_service: network, - sync_oracle, + sync_service, authority_discovery_service, - collation_req_receiver_v1, - collation_req_receiver_v2, + collation_req_v1_receiver, + collation_req_v2_receiver, available_data_req_receiver, registry: prometheus_registry, spawner: task_manager.spawn_handle(), - collator_pair, + is_parachain_node: IsParachainNode::Collator(collator_pair), + overseer_message_channel_capacity_override: None, req_protocol_names: request_protocol_names, - peer_set_protocol_names, + peerset_protocol_names, notification_services, }; @@ -240,10 +242,10 @@ fn build_request_response_protocol_receivers( IncomingRequestReceiver, IncomingRequestReceiver, ) { - let (collation_req_receiver_v1, cfg) = + let (collation_req_v1_receiver, cfg) = IncomingRequest::get_config_receiver(request_protocol_names); config.add_request_response_protocol(cfg); - let (collation_req_receiver_v2, cfg) = + let (collation_req_v2_receiver, cfg) = IncomingRequest::get_config_receiver(request_protocol_names); config.add_request_response_protocol(cfg); let (available_data_req_receiver, cfg) = @@ -251,5 +253,5 @@ fn build_request_response_protocol_receivers( config.add_request_response_protocol(cfg); let cfg = Protocol::ChunkFetchingV1.get_outbound_only_config(request_protocol_names); config.add_request_response_protocol(cfg); - (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) + (collation_req_v1_receiver, collation_req_v2_receiver, available_data_req_receiver) } diff --git a/cumulus/client/relay-chain-minimal-node/src/network.rs b/cumulus/client/relay-chain-minimal-node/src/network.rs index 95785063c1a..7286fab7907 100644 --- a/cumulus/client/relay-chain-minimal-node/src/network.rs +++ b/cumulus/client/relay-chain-minimal-node/src/network.rs @@ -40,7 +40,11 @@ pub(crate) fn build_collator_network( genesis_hash: Hash, best_header: Header, ) -> Result< - (Arc>, NetworkStarter, Box), + ( + Arc>, + NetworkStarter, + Arc, + ), Error, > { let protocol_id = config.protocol_id(); @@ -112,7 +116,7 @@ pub(crate) fn build_collator_network( let network_starter = NetworkStarter::new(network_start_tx); - Ok((network_service, network_starter, Box::new(SyncOracle {}))) + Ok((network_service, network_starter, Arc::new(SyncOracle {}))) } fn adjust_network_config_light_in_peers(config: &mut NetworkConfiguration) { diff --git a/polkadot/node/malus/src/variants/back_garbage_candidate.rs b/polkadot/node/malus/src/variants/back_garbage_candidate.rs index 82475d29142..b939a2151e2 100644 --- a/polkadot/node/malus/src/variants/back_garbage_candidate.rs +++ b/polkadot/node/malus/src/variants/back_garbage_candidate.rs @@ -20,14 +20,13 @@ use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; use polkadot_node_subsystem::SpawnGlue; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use sp_core::traits::SpawnNamed; use crate::{ @@ -63,13 +62,9 @@ impl OverseerGen for BackGarbageCandidates { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { let spawner = args.spawner.clone(); diff --git a/polkadot/node/malus/src/variants/common.rs b/polkadot/node/malus/src/variants/common.rs index 011fcc80e37..eb6988f8181 100644 --- a/polkadot/node/malus/src/variants/common.rs +++ b/polkadot/node/malus/src/variants/common.rs @@ -23,10 +23,6 @@ use crate::{ use polkadot_node_core_candidate_validation::find_validation_data; use polkadot_node_primitives::{InvalidCandidate, ValidationResult}; -use polkadot_node_subsystem::{ - messages::{CandidateValidationMessage, ValidationFailed}, - overseer, -}; use polkadot_primitives::{ CandidateCommitments, CandidateDescriptor, CandidateReceipt, PersistedValidationData, diff --git a/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs b/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs index b3555ba2f5b..7a95bdaead2 100644 --- a/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs +++ b/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs @@ -34,14 +34,13 @@ use futures::channel::oneshot; use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; -use polkadot_node_subsystem::{messages::ApprovalVotingMessage, SpawnGlue}; -use polkadot_node_subsystem_types::{DefaultSubsystemClient, OverseerSignal}; +use polkadot_node_subsystem::SpawnGlue; +use polkadot_node_subsystem_types::{ChainApiBackend, OverseerSignal, RuntimeApiSubsystemClient}; use polkadot_node_subsystem_util::request_candidate_events; use polkadot_primitives::CandidateEvent; use sp_core::traits::SpawnNamed; @@ -237,13 +236,9 @@ impl OverseerGen for DisputeFinalizedCandidates { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { gum::info!( diff --git a/polkadot/node/malus/src/variants/dispute_valid_candidates.rs b/polkadot/node/malus/src/variants/dispute_valid_candidates.rs index b9812cbb501..a50fdce16e4 100644 --- a/polkadot/node/malus/src/variants/dispute_valid_candidates.rs +++ b/polkadot/node/malus/src/variants/dispute_valid_candidates.rs @@ -24,14 +24,13 @@ use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; use polkadot_node_subsystem::SpawnGlue; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use sp_core::traits::SpawnNamed; // Filter wrapping related types. @@ -80,13 +79,9 @@ impl OverseerGen for DisputeValidCandidates { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { let spawner = args.spawner.clone(); diff --git a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs index 22b44ddd1dc..739ed40db36 100644 --- a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs +++ b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs @@ -25,14 +25,13 @@ use futures::channel::oneshot; use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; use polkadot_node_primitives::{AvailableData, BlockData, PoV}; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use polkadot_primitives::{CandidateDescriptor, CandidateReceipt}; use polkadot_node_subsystem_util::request_validators; @@ -52,7 +51,7 @@ use crate::{ // Import extra types relevant to the particular // subsystem. -use polkadot_node_subsystem::{messages::CandidateBackingMessage, SpawnGlue}; +use polkadot_node_subsystem::SpawnGlue; use std::sync::Arc; @@ -296,13 +295,9 @@ impl OverseerGen for SuggestGarbageCandidates { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { gum::info!( diff --git a/polkadot/node/malus/src/variants/support_disabled.rs b/polkadot/node/malus/src/variants/support_disabled.rs index e25df56fd64..169c442db25 100644 --- a/polkadot/node/malus/src/variants/support_disabled.rs +++ b/polkadot/node/malus/src/variants/support_disabled.rs @@ -19,14 +19,13 @@ use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; use polkadot_node_subsystem::SpawnGlue; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use sp_core::traits::SpawnNamed; use crate::interceptor::*; @@ -50,13 +49,9 @@ impl OverseerGen for SupportDisabled { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { validator_overseer_builder( diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 6dcdec07ca8..83a0afc077e 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -92,6 +92,7 @@ pub use chain_spec::{GenericChainSpec, RococoChainSpec, WestendChainSpec}; pub use consensus_common::{Proposal, SelectChain}; use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; use mmr_gadget::MmrGadget; +use polkadot_node_subsystem_types::DefaultSubsystemClient; pub use polkadot_primitives::{Block, BlockId, BlockNumber, CollatorPair, Hash, Id as ParaId}; pub use sc_client_api::{Backend, CallExecutor}; pub use sc_consensus::{BlockImport, LongestChain}; @@ -1081,12 +1082,17 @@ pub fn new_full( None }; + let runtime_client = Arc::new(DefaultSubsystemClient::new( + overseer_client.clone(), + OffchainTransactionPoolFactory::new(transaction_pool.clone()), + )); + let overseer_handle = if let Some(authority_discovery_service) = authority_discovery_service { let (overseer, overseer_handle) = overseer_gen - .generate::( + .generate::>( overseer_connector, OverseerGenArgs { - runtime_client: overseer_client.clone(), + runtime_client, network_service: network.clone(), sync_service: sync_service.clone(), authority_discovery_service, @@ -1099,9 +1105,6 @@ pub fn new_full( overseer_message_channel_capacity_override, req_protocol_names, peerset_protocol_names, - offchain_transaction_pool_factory: OffchainTransactionPoolFactory::new( - transaction_pool.clone(), - ), notification_services, }, ext_overseer_args, diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index b2b0786bfc2..a8167370464 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -14,10 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{AuthorityDiscoveryApi, Block, Error, Hash, IsParachainNode, Registry}; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use super::{Block, Error, Hash, IsParachainNode, Registry}; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use polkadot_overseer::{DummySubsystem, InitializedOverseerBuilder, SubsystemError}; -use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_core::traits::SpawnNamed; use polkadot_availability_distribution::IncomingRequestReceivers; @@ -40,14 +39,10 @@ use polkadot_overseer::{ }; use parking_lot::Mutex; -use polkadot_primitives::runtime_api::ParachainHost; use sc_authority_discovery::Service as AuthorityDiscoveryService; use sc_client_api::AuxStore; use sc_keystore::LocalKeystore; use sc_network::{NetworkStateInfo, NotificationService}; -use sp_api::ProvideRuntimeApi; -use sp_blockchain::HeaderBackend; -use sp_consensus_babe::BabeApi; use std::{collections::HashMap, sync::Arc}; pub use polkadot_approval_distribution::ApprovalDistribution as ApprovalDistributionSubsystem; @@ -80,8 +75,6 @@ pub use polkadot_statement_distribution::StatementDistributionSubsystem; /// Arguments passed for overseer construction. pub struct OverseerGenArgs<'a, Spawner, RuntimeClient> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, Spawner: 'static + SpawnNamed + Clone + Unpin, { /// Runtime client generic, providing the `ProvieRuntimeApi` trait besides others. @@ -89,7 +82,7 @@ where /// Underlying network service implementation. pub network_service: Arc>, /// Underlying syncing service implementation. - pub sync_service: Arc>, + pub sync_service: Arc, /// Underlying authority discovery service. pub authority_discovery_service: AuthorityDiscoveryService, /// Collations request receiver for network protocol v1. @@ -111,8 +104,6 @@ where pub req_protocol_names: ReqProtocolNames, /// `PeerSet` protocol names to protocols mapping. pub peerset_protocol_names: PeerSetProtocolNames, - /// The offchain transaction pool factory. - pub offchain_transaction_pool_factory: OffchainTransactionPoolFactory, /// Notification services for validation/collation protocols. pub notification_services: HashMap>, } @@ -160,7 +151,6 @@ pub fn validator_overseer_builder( overseer_message_channel_capacity_override, req_protocol_names, peerset_protocol_names, - offchain_transaction_pool_factory, notification_services, }: OverseerGenArgs, ExtendedOverseerGenArgs { @@ -180,7 +170,7 @@ pub fn validator_overseer_builder( ) -> Result< InitializedOverseerBuilder< SpawnGlue, - Arc>, + Arc, CandidateValidationSubsystem, PvfCheckerSubsystem, CandidateBackingSubsystem, @@ -190,7 +180,7 @@ pub fn validator_overseer_builder( BitfieldSigningSubsystem, BitfieldDistributionSubsystem, ProvisionerSubsystem, - RuntimeApiSubsystem>, + RuntimeApiSubsystem, AvailabilityStoreSubsystem, NetworkBridgeRxSubsystem< Arc>, @@ -214,8 +204,7 @@ pub fn validator_overseer_builder( Error, > where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { use polkadot_node_subsystem_util::metrics::Metrics; @@ -227,11 +216,6 @@ where let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; - let runtime_api_client = Arc::new(DefaultSubsystemClient::new( - runtime_client.clone(), - offchain_transaction_pool_factory, - )); - let builder = Overseer::builder() .network_bridge_tx(NetworkBridgeTxSubsystem::new( network_service.clone(), @@ -298,7 +282,7 @@ where }) .provisioner(ProvisionerSubsystem::new(Metrics::register(registry)?)) .runtime_api(RuntimeApiSubsystem::new( - runtime_api_client.clone(), + runtime_client.clone(), Metrics::register(registry)?, spawner.clone(), )) @@ -339,7 +323,7 @@ where .activation_external_listeners(Default::default()) .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) - .supports_parachains(runtime_api_client) + .supports_parachains(runtime_client) .metrics(metrics) .spawner(spawner); @@ -367,13 +351,12 @@ pub fn collator_overseer_builder( overseer_message_channel_capacity_override, req_protocol_names, peerset_protocol_names, - offchain_transaction_pool_factory, notification_services, }: OverseerGenArgs, ) -> Result< InitializedOverseerBuilder< SpawnGlue, - Arc>, + Arc, DummySubsystem, DummySubsystem, DummySubsystem, @@ -383,7 +366,7 @@ pub fn collator_overseer_builder( DummySubsystem, DummySubsystem, DummySubsystem, - RuntimeApiSubsystem>, + RuntimeApiSubsystem, DummySubsystem, NetworkBridgeRxSubsystem< Arc>, @@ -407,24 +390,17 @@ pub fn collator_overseer_builder( Error, > where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, Spawner: 'static + SpawnNamed + Clone + Unpin, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, { use polkadot_node_subsystem_util::metrics::Metrics; - let metrics = ::register(registry)?; let notification_sinks = Arc::new(Mutex::new(HashMap::new())); let spawner = SpawnGlue(spawner); let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; - let runtime_api_client = Arc::new(DefaultSubsystemClient::new( - runtime_client.clone(), - offchain_transaction_pool_factory, - )); - let builder = Overseer::builder() .network_bridge_tx(NetworkBridgeTxSubsystem::new( network_service.clone(), @@ -475,7 +451,7 @@ where }) .provisioner(DummySubsystem) .runtime_api(RuntimeApiSubsystem::new( - runtime_api_client.clone(), + runtime_client.clone(), Metrics::register(registry)?, spawner.clone(), )) @@ -490,8 +466,8 @@ where .activation_external_listeners(Default::default()) .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) - .supports_parachains(runtime_api_client) - .metrics(metrics) + .supports_parachains(runtime_client) + .metrics(Metrics::register(registry)?) .spawner(spawner); let builder = if let Some(capacity) = overseer_message_channel_capacity_override { @@ -510,13 +486,9 @@ pub trait OverseerGen { connector: OverseerConnector, args: OverseerGenArgs, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin; // It would be nice to make `create_subsystems` part of this trait, @@ -533,13 +505,9 @@ impl OverseerGen for ValidatorOverseerGen { connector: OverseerConnector, args: OverseerGenArgs, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { let ext_args = ext_args.ok_or(Error::Overseer(SubsystemError::Context( @@ -561,13 +529,9 @@ impl OverseerGen for CollatorOverseerGen { connector: OverseerConnector, args: OverseerGenArgs, _ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { collator_overseer_builder(args)? diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 7f618307610..4039fc9127d 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -26,13 +26,13 @@ use polkadot_primitives::{ PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; -use sc_client_api::HeaderBackend; +use sc_client_api::{AuxStore, HeaderBackend}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_api::{ApiError, ApiExt, ProvideRuntimeApi}; use sp_authority_discovery::AuthorityDiscoveryApi; -use sp_blockchain::Info; +use sp_blockchain::{BlockStatus, Info}; use sp_consensus_babe::{BabeApi, Epoch}; -use sp_runtime::traits::{Header as HeaderT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use std::{collections::BTreeMap, sync::Arc}; /// Offers header utilities. @@ -595,3 +595,61 @@ where self.client.runtime_api().approval_voting_params(at) } } + +impl HeaderBackend for DefaultSubsystemClient +where + Client: HeaderBackend, + Block: sp_runtime::traits::Block, +{ + fn header( + &self, + hash: Block::Hash, + ) -> sc_client_api::blockchain::Result> { + self.client.header(hash) + } + + fn info(&self) -> Info { + self.client.info() + } + + fn status(&self, hash: Block::Hash) -> sc_client_api::blockchain::Result { + self.client.status(hash) + } + + fn number( + &self, + hash: Block::Hash, + ) -> sc_client_api::blockchain::Result::Header as HeaderT>::Number>> { + self.client.number(hash) + } + + fn hash( + &self, + number: NumberFor, + ) -> sc_client_api::blockchain::Result> { + self.client.hash(number) + } +} + +impl AuxStore for DefaultSubsystemClient +where + Client: AuxStore, +{ + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { + self.client.insert_aux(insert, delete) + } + + fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { + self.client.get_aux(key) + } +} diff --git a/substrate/utils/wasm-builder/src/prerequisites.rs b/substrate/utils/wasm-builder/src/prerequisites.rs index a601e3210dd..22caf895063 100644 --- a/substrate/utils/wasm-builder/src/prerequisites.rs +++ b/substrate/utils/wasm-builder/src/prerequisites.rs @@ -149,6 +149,14 @@ impl<'a> DummyCrate<'a> { sysroot_cmd.output().ok().and_then(|o| String::from_utf8(o.stdout).ok()) } + fn get_toolchain(&self) -> Option { + let sysroot = self.get_sysroot()?; + Path::new(sysroot.trim()) + .file_name() + .and_then(|s| s.to_str()) + .map(|s| s.to_string()) + } + fn try_build(&self) -> Result<(), Option> { let Ok(result) = self.prepare_command("build").output() else { return Err(None) }; if !result.status.success() { @@ -164,14 +172,15 @@ fn check_wasm_toolchain_installed( let dummy_crate = DummyCrate::new(&cargo_command, RuntimeTarget::Wasm); if let Err(error) = dummy_crate.try_build() { + let toolchain = dummy_crate.get_toolchain().unwrap_or("".to_string()); let basic_error_message = colorize_error_message( - "Rust WASM toolchain is not properly installed; please install it!", + &format!("Rust WASM target for toolchain {toolchain} is not properly installed; please install it!") ); return match error { None => Err(basic_error_message), Some(error) if error.contains("the `wasm32-unknown-unknown` target may not be installed") => { - Err(colorize_error_message("Cannot compile the WASM runtime: the `wasm32-unknown-unknown` target is not installed!\n\ - You can install it with `rustup target add wasm32-unknown-unknown` if you're using `rustup`.")) + Err(colorize_error_message(&format!("Cannot compile the WASM runtime: the `wasm32-unknown-unknown` target is not installed!\n\ + You can install it with `rustup target add wasm32-unknown-unknown --toolchain {toolchain}` if you're using `rustup`."))) }, // Apparently this can happen when we're running on a non Tier 1 platform. Some(ref error) if error.contains("linker `rust-lld` not found") => @@ -193,9 +202,10 @@ fn check_wasm_toolchain_installed( let src_path = Path::new(sysroot.trim()).join("lib").join("rustlib").join("src").join("rust"); if !src_path.exists() { + let toolchain = dummy_crate.get_toolchain().unwrap_or("".to_string()); return Err(colorize_error_message( - "Cannot compile the WASM runtime: no standard library sources found!\n\ - You can install them with `rustup component add rust-src` if you're using `rustup`.", + &format!("Cannot compile the WASM runtime: no standard library sources found at {}!\n\ + You can install them with `rustup component add rust-src --toolchain {toolchain}` if you're using `rustup`.", src_path.display()), )) } } -- GitLab From 12ce4f7d049b70918cadb658de4bbe8eb6ffc670 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Wed, 28 Feb 2024 18:32:02 +1100 Subject: [PATCH 13/86] Runtime Upgrade ref docs and Single Block Migration example pallet (#1554) Closes https://github.com/paritytech/polkadot-sdk-docs/issues/55 - Changes 'current storage version' terminology to less ambiguous 'in-code storage version' (suggestion by @ggwpez) - Adds a new example pallet `pallet-example-single-block-migrations` - Adds a new reference doc to replace https://docs.substrate.io/maintain/runtime-upgrades/ (temporarily living in the pallet while we wait for developer hub PR to merge) - Adds documentation for the `storage_alias` macro - Improves `trait Hooks` docs - Improves `trait GetStorageVersion` docs - Update the suggested patterns for using `VersionedMigration`, so that version unchecked migrations are never exported - Prevents accidental usage of version unchecked migrations in runtimes https://github.com/paritytech/substrate/pull/14421#discussion_r1255467895 - Unversioned migration code is kept inside `mod version_unchecked`, versioned code is kept in `pub mod versioned` - It is necessary to use modules to limit visibility because the inner migration must be `pub`. See https://github.com/rust-lang/rust/issues/30905 and https://internals.rust-lang.org/t/lang-team-minutes-private-in-public-rules/4504/40 for more. ### todo - [x] move to reference docs to proper place within sdk-docs (now that https://github.com/paritytech/polkadot-sdk/pull/2102 is merged) - [x] prdoc --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Juan Co-authored-by: Oliver Tale-Yazdi Co-authored-by: command-bot <> Co-authored-by: gupnik --- Cargo.lock | 26 ++ Cargo.toml | 1 + cumulus/pallets/collator-selection/src/lib.rs | 2 +- .../collator-selection/src/migration.rs | 10 +- .../pallets/parachain-system/src/migration.rs | 2 +- cumulus/pallets/xcmp-queue/src/migration.rs | 2 +- .../pallets/collective-content/src/lib.rs | 2 +- .../assets/asset-hub-rococo/src/lib.rs | 14 +- .../assets/asset-hub-westend/src/lib.rs | 6 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 4 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 4 +- docs/sdk/Cargo.toml | 7 +- .../reference_docs/frame_runtime_migration.rs | 9 - .../frame_runtime_upgrades_and_migrations.rs | 138 +++++++++++ docs/sdk/src/reference_docs/mod.rs | 5 +- .../node/service/src/parachains_db/upgrade.rs | 2 +- .../common/src/assigned_slots/migration.rs | 12 +- .../runtime/common/src/assigned_slots/mod.rs | 2 +- .../runtime/common/src/crowdloan/migration.rs | 4 +- polkadot/runtime/common/src/crowdloan/mod.rs | 2 +- .../runtime/common/src/paras_registrar/mod.rs | 2 +- .../runtime/parachains/src/configuration.rs | 2 +- polkadot/runtime/parachains/src/disputes.rs | 2 +- .../parachains/src/session_info/migration.rs | 2 +- prdoc/pr_1554.prdoc | 14 ++ substrate/client/executor/src/executor.rs | 14 +- substrate/frame/alliance/src/migration.rs | 8 +- substrate/frame/assets/src/lib.rs | 2 +- substrate/frame/assets/src/migration.rs | 20 +- substrate/frame/balances/src/lib.rs | 2 +- substrate/frame/balances/src/migration.rs | 8 +- substrate/frame/collective/src/lib.rs | 2 +- substrate/frame/contracts/src/lib.rs | 2 +- substrate/frame/contracts/src/migration.rs | 20 +- .../frame/contracts/src/migration/v09.rs | 14 +- .../frame/contracts/src/migration/v10.rs | 24 +- .../frame/contracts/src/migration/v11.rs | 12 +- .../frame/contracts/src/migration/v12.rs | 40 ++-- .../frame/contracts/src/migration/v13.rs | 10 +- .../frame/contracts/src/migration/v14.rs | 20 +- .../frame/contracts/src/migration/v15.rs | 16 +- substrate/frame/democracy/src/lib.rs | 2 +- .../election-provider-multi-phase/src/lib.rs | 2 +- .../src/migrations.rs | 4 +- substrate/frame/elections-phragmen/src/lib.rs | 2 +- substrate/frame/examples/Cargo.toml | 3 + .../single-block-migrations/Cargo.toml | 61 +++++ .../single-block-migrations/src/lib.rs | 213 +++++++++++++++++ .../src/migrations/mod.rs | 20 ++ .../src/migrations/v1.rs | 222 ++++++++++++++++++ .../single-block-migrations/src/mock.rs | 69 ++++++ substrate/frame/examples/src/lib.rs | 3 + .../frame/fast-unstake/src/migrations.rs | 4 +- substrate/frame/grandpa/src/lib.rs | 2 +- substrate/frame/im-online/src/lib.rs | 2 +- substrate/frame/im-online/src/migration.rs | 2 +- substrate/frame/membership/src/lib.rs | 2 +- substrate/frame/multisig/src/lib.rs | 2 +- substrate/frame/multisig/src/migrations.rs | 2 +- substrate/frame/nfts/src/lib.rs | 2 +- substrate/frame/nfts/src/migration.rs | 16 +- substrate/frame/nomination-pools/src/lib.rs | 2 +- .../frame/nomination-pools/src/migration.rs | 28 +-- substrate/frame/preimage/src/lib.rs | 2 +- substrate/frame/referenda/src/lib.rs | 2 +- substrate/frame/referenda/src/migration.rs | 16 +- substrate/frame/scheduler/src/lib.rs | 2 +- substrate/frame/session/src/historical/mod.rs | 2 +- substrate/frame/session/src/lib.rs | 2 +- substrate/frame/society/src/migrations.rs | 54 ++--- substrate/frame/society/src/tests.rs | 44 ++-- substrate/frame/staking/src/migrations.rs | 12 +- substrate/frame/staking/src/pallet/mod.rs | 2 +- substrate/frame/support/procedural/src/lib.rs | 38 ++- .../procedural/src/pallet/expand/hooks.rs | 22 +- .../src/pallet/expand/pallet_struct.rs | 6 +- .../src/pallet/parse/pallet_struct.rs | 2 +- substrate/frame/support/src/lib.rs | 4 +- substrate/frame/support/src/migrations.rs | 55 +++-- substrate/frame/support/src/traits/hooks.rs | 70 +++--- .../frame/support/src/traits/metadata.rs | 71 ++++-- .../support/test/tests/construct_runtime.rs | 2 +- substrate/frame/support/test/tests/pallet.rs | 22 +- .../compare_unset_storage_version.rs | 2 +- .../compare_unset_storage_version.stderr | 2 +- substrate/frame/system/src/lib.rs | 2 +- substrate/frame/tips/src/lib.rs | 2 +- 87 files changed, 1223 insertions(+), 370 deletions(-) delete mode 100644 docs/sdk/src/reference_docs/frame_runtime_migration.rs create mode 100644 docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs create mode 100644 prdoc/pr_1554.prdoc create mode 100644 substrate/frame/examples/single-block-migrations/Cargo.toml create mode 100644 substrate/frame/examples/single-block-migrations/src/lib.rs create mode 100644 substrate/frame/examples/single-block-migrations/src/migrations/mod.rs create mode 100644 substrate/frame/examples/single-block-migrations/src/migrations/v1.rs create mode 100644 substrate/frame/examples/single-block-migrations/src/mock.rs diff --git a/Cargo.lock b/Cargo.lock index 27ba43780ed..026786378d5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9965,6 +9965,26 @@ dependencies = [ "sp-std 14.0.0", ] +[[package]] +name = "pallet-example-single-block-migrations" +version = "0.0.1" +dependencies = [ + "docify", + "frame-executive", + "frame-support", + "frame-system", + "frame-try-runtime", + "log", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 14.0.0", + "sp-version", +] + [[package]] name = "pallet-example-split" version = "10.0.0" @@ -10006,6 +10026,7 @@ dependencies = [ "pallet-example-frame-crate", "pallet-example-kitchensink", "pallet-example-offchain-worker", + "pallet-example-single-block-migrations", "pallet-example-split", "pallet-example-tasks", ] @@ -13385,6 +13406,8 @@ dependencies = [ "cumulus-pallet-parachain-system", "docify", "frame", + "frame-executive", + "frame-support", "frame-system", "kitchensink-runtime", "pallet-aura", @@ -13393,9 +13416,11 @@ dependencies = [ "pallet-collective", "pallet-default-config-example", "pallet-democracy", + "pallet-example-single-block-migrations", "pallet-examples", "pallet-multisig", "pallet-proxy", + "pallet-scheduler", "pallet-timestamp", "pallet-transaction-payment", "pallet-utility", @@ -13418,6 +13443,7 @@ dependencies = [ "sp-io", "sp-keyring", "sp-runtime", + "sp-version", "staging-chain-spec-builder", "staging-node-cli", "staging-parachain-info", diff --git a/Cargo.toml b/Cargo.toml index 1d27cfe9539..654367cc1e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -337,6 +337,7 @@ members = [ "substrate/frame/examples/frame-crate", "substrate/frame/examples/kitchensink", "substrate/frame/examples/offchain-worker", + "substrate/frame/examples/single-block-migrations", "substrate/frame/examples/split", "substrate/frame/examples/tasks", "substrate/frame/executive", diff --git a/cumulus/pallets/collator-selection/src/lib.rs b/cumulus/pallets/collator-selection/src/lib.rs index 7449f4d68c7..fb4c4a445df 100644 --- a/cumulus/pallets/collator-selection/src/lib.rs +++ b/cumulus/pallets/collator-selection/src/lib.rs @@ -118,7 +118,7 @@ pub mod pallet { use sp_staking::SessionIndex; use sp_std::vec::Vec; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); type BalanceOf = diff --git a/cumulus/pallets/collator-selection/src/migration.rs b/cumulus/pallets/collator-selection/src/migration.rs index 58b4cc5b06a..f384981dbae 100644 --- a/cumulus/pallets/collator-selection/src/migration.rs +++ b/cumulus/pallets/collator-selection/src/migration.rs @@ -31,8 +31,8 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 0 { + let on_chain_version = Pallet::::on_chain_storage_version(); + if on_chain_version == 0 { let invulnerables_len = Invulnerables::::get().to_vec().len(); >::mutate(|invulnerables| { invulnerables.sort(); @@ -45,7 +45,7 @@ pub mod v1 { invulnerables_len, ); // Similar complexity to `set_invulnerables` (put storage value) - // Plus 1 read for length, 1 read for `onchain_version`, 1 write to put version + // Plus 1 read for length, 1 read for `on_chain_version`, 1 write to put version T::WeightInfo::set_invulnerables(invulnerables_len as u32) .saturating_add(T::DbWeight::get().reads_writes(2, 1)) } else { @@ -83,8 +83,8 @@ pub mod v1 { "after migration, there should be the same number of invulnerables" ); - let onchain_version = Pallet::::on_chain_storage_version(); - frame_support::ensure!(onchain_version >= 1, "must_upgrade"); + let on_chain_version = Pallet::::on_chain_storage_version(); + frame_support::ensure!(on_chain_version >= 1, "must_upgrade"); Ok(()) } diff --git a/cumulus/pallets/parachain-system/src/migration.rs b/cumulus/pallets/parachain-system/src/migration.rs index a92f85b9cd4..30106aceab5 100644 --- a/cumulus/pallets/parachain-system/src/migration.rs +++ b/cumulus/pallets/parachain-system/src/migration.rs @@ -21,7 +21,7 @@ use frame_support::{ weights::Weight, }; -/// The current storage version. +/// The in-code storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); /// Migrates the pallet storage to the most recent version. diff --git a/cumulus/pallets/xcmp-queue/src/migration.rs b/cumulus/pallets/xcmp-queue/src/migration.rs index 6c86c3011d2..c7fa61a3e3f 100644 --- a/cumulus/pallets/xcmp-queue/src/migration.rs +++ b/cumulus/pallets/xcmp-queue/src/migration.rs @@ -24,7 +24,7 @@ use frame_support::{ weights::{constants::WEIGHT_REF_TIME_PER_MILLIS, Weight}, }; -/// The current storage version. +/// The in-code storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); pub const LOG: &str = "runtime::xcmp-queue-migration"; diff --git a/cumulus/parachains/pallets/collective-content/src/lib.rs b/cumulus/parachains/pallets/collective-content/src/lib.rs index 7a685858acc..b1c960ad6a0 100644 --- a/cumulus/parachains/pallets/collective-content/src/lib.rs +++ b/cumulus/parachains/pallets/collective-content/src/lib.rs @@ -59,7 +59,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; use sp_runtime::{traits::BadOrigin, Saturating}; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); #[pallet::pallet] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 81bb66a56a2..6791dc4064e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -986,37 +986,37 @@ impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { let mut writes = 0; if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { - PolkadotXcm::current_storage_version().put::(); + PolkadotXcm::in_code_storage_version().put::(); writes.saturating_inc(); } if Multisig::on_chain_storage_version() == StorageVersion::new(0) { - Multisig::current_storage_version().put::(); + Multisig::in_code_storage_version().put::(); writes.saturating_inc(); } if Assets::on_chain_storage_version() == StorageVersion::new(0) { - Assets::current_storage_version().put::(); + Assets::in_code_storage_version().put::(); writes.saturating_inc(); } if Uniques::on_chain_storage_version() == StorageVersion::new(0) { - Uniques::current_storage_version().put::(); + Uniques::in_code_storage_version().put::(); writes.saturating_inc(); } if Nfts::on_chain_storage_version() == StorageVersion::new(0) { - Nfts::current_storage_version().put::(); + Nfts::in_code_storage_version().put::(); writes.saturating_inc(); } if ForeignAssets::on_chain_storage_version() == StorageVersion::new(0) { - ForeignAssets::current_storage_version().put::(); + ForeignAssets::in_code_storage_version().put::(); writes.saturating_inc(); } if PoolAssets::on_chain_storage_version() == StorageVersion::new(0) { - PoolAssets::current_storage_version().put::(); + PoolAssets::in_code_storage_version().put::(); writes.saturating_inc(); } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 3f58c679eee..5352eeb1a1b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1037,17 +1037,17 @@ impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { let mut writes = 0; if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { - PolkadotXcm::current_storage_version().put::(); + PolkadotXcm::in_code_storage_version().put::(); writes.saturating_inc(); } if ForeignAssets::on_chain_storage_version() == StorageVersion::new(0) { - ForeignAssets::current_storage_version().put::(); + ForeignAssets::in_code_storage_version().put::(); writes.saturating_inc(); } if PoolAssets::on_chain_storage_version() == StorageVersion::new(0) { - PoolAssets::current_storage_version().put::(); + PoolAssets::in_code_storage_version().put::(); writes.saturating_inc(); } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index f576776696c..db1dfbd45d6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -168,12 +168,12 @@ impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { let mut writes = 0; if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { - PolkadotXcm::current_storage_version().put::(); + PolkadotXcm::in_code_storage_version().put::(); writes.saturating_inc(); } if Balances::on_chain_storage_version() == StorageVersion::new(0) { - Balances::current_storage_version().put::(); + Balances::in_code_storage_version().put::(); writes.saturating_inc(); } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index bd42a33370d..1c8c9aa2075 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -142,12 +142,12 @@ impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { let mut writes = 0; if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { - PolkadotXcm::current_storage_version().put::(); + PolkadotXcm::in_code_storage_version().put::(); writes.saturating_inc(); } if Balances::on_chain_storage_version() == StorageVersion::new(0) { - Balances::current_storage_version().put::(); + Balances::in_code_storage_version().put::(); writes.saturating_inc(); } diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 576a81834f8..b1b60a2d77d 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -33,6 +33,10 @@ node-cli = { package = "staging-node-cli", path = "../../substrate/bin/node/cli" kitchensink-runtime = { path = "../../substrate/bin/node/runtime" } chain-spec-builder = { package = "staging-chain-spec-builder", path = "../../substrate/bin/utils/chain-spec-builder" } subkey = { path = "../../substrate/bin/utils/subkey" } +frame-system = { path = "../../substrate/frame/system", default-features = false } +frame-support = { path = "../../substrate/frame/support", default-features = false } +frame-executive = { path = "../../substrate/frame/executive", default-features = false } +pallet-example-single-block-migrations = { path = "../../substrate/frame/examples/single-block-migrations" } # Substrate sc-network = { path = "../../substrate/client/network" } @@ -66,7 +70,7 @@ pallet-proxy = { path = "../../substrate/frame/proxy" } pallet-authorship = { path = "../../substrate/frame/authorship" } pallet-collective = { path = "../../substrate/frame/collective" } pallet-democracy = { path = "../../substrate/frame/democracy" } -frame-system = { path = "../../substrate/frame/system" } +pallet-scheduler = { path = "../../substrate/frame/scheduler" } # Primitives sp-io = { path = "../../substrate/primitives/io" } @@ -74,6 +78,7 @@ sp-api = { path = "../../substrate/primitives/api" } sp-core = { path = "../../substrate/primitives/core" } sp-keyring = { path = "../../substrate/primitives/keyring" } sp-runtime = { path = "../../substrate/primitives/runtime" } +sp-version = { path = "../../substrate/primitives/version" } # XCM xcm = { package = "staging-xcm", path = "../../polkadot/xcm" } diff --git a/docs/sdk/src/reference_docs/frame_runtime_migration.rs b/docs/sdk/src/reference_docs/frame_runtime_migration.rs deleted file mode 100644 index 0616ccbb6f5..00000000000 --- a/docs/sdk/src/reference_docs/frame_runtime_migration.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! # Runtime Runtime Upgrade and Testing -//! -//! -//! Notes: -//! -//! - Flow of things, when does `on_runtime_upgrade` get called. Link to to `Hooks` and its diagram -//! as source of truth. -//! - Data migration and when it is needed. -//! - Look into the pba-lecture. diff --git a/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs b/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs new file mode 100644 index 00000000000..7d870b43221 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs @@ -0,0 +1,138 @@ +//! # Runtime Upgrades +//! +//! At their core, blockchain logic consists of +//! +//! 1. on-chain state and +//! 2. a state transition function +//! +//! In Substrate-based blockchains, state transition functions are referred to as +//! [runtimes](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/blockchain_state_machines/index.html). +//! +//! Traditionally, before Substrate, upgrading state transition functions required node +//! operators to download new software and restart their nodes in a process called +//! [forking](https://en.wikipedia.org/wiki/Fork_(blockchain)). +//! +//! Substrate-based blockchains do not require forking, and instead upgrade runtimes +//! in a process called "Runtime Upgrades". +//! +//! Forkless runtime upgrades are a defining feature of the Substrate framework. Updating the +//! runtime logic without forking the code base enables your blockchain to seemlessly evolve +//! over time in a deterministic, rules-based manner. It also removes ambiguity for node operators +//! and other participants in the network about what is the canonical runtime. +//! +//! This capability is possible due to the runtime of a blockchain existing in on-chain storage. +//! +//! ## Performing a Runtime Upgrade +//! +//! To upgrade a runtime, an [`Origin`](frame_system::RawOrigin) with the necesarry permissions +//! (usually via governance) changes the `:code` storage. Usually, this is performed via a call to +//! [`set_code`] (or [`set_code_without_checks`]) with the desired new runtime blob, scheduled +//! using [`pallet_scheduler`]. +//! +//! Prior to building the new runtime, don't forget to update the +//! [`RuntimeVersion`](sp_version::RuntimeVersion). +//! +//! # Migrations +//! +//! It is often desirable to define logic to execute immediately after runtime upgrades (see +//! [this diagram](frame::traits::Hooks)). +//! +//! Self-contained pieces of logic that execute after a runtime upgrade are called "Migrations". +//! +//! The typical use case of a migration is to 'migrate' pallet storage from one layout to another, +//! for example when the encoding of a storage item is changed. However, they can also execute +//! arbitary logic such as: +//! +//! - Calling arbitrary pallet methods +//! - Mutating arbitrary on-chain state +//! - Cleaning up some old storage items that are no longer needed +//! +//! ## Single Block Migrations +//! +//! - Execute immediately and entirely at the beginning of the block following +//! a runtime upgrade. +//! - Are suitable for migrations which are guaranteed to not exceed the block weight. +//! - Are simply implementations of [`OnRuntimeUpgrade`]. +//! +//! To learn best practices for writing single block pallet storage migrations, see the +//! [Single Block Migration Example Pallet](pallet_example_single_block_migrations). +//! +//! ### Scheduling the Single Block Migrations to Run Next Runtime Upgrade +//! +//! Schedule migrations to run next runtime upgrade passing them as a generic parameter to your +//! [`Executive`](frame_executive) pallet: +//! +//! ```ignore +//! /// Tuple of migrations (structs that implement `OnRuntimeUpgrade`) +//! type Migrations = ( +//! pallet_example_storage_migration::migrations::v1::versioned::MigrateV0ToV1, +//! MyCustomMigration, +//! // ...more migrations here +//! ); +//! pub type Executive = frame_executive::Executive< +//! Runtime, +//! Block, +//! frame_system::ChainContext, +//! Runtime, +//! AllPalletsWithSystem, +//! Migrations, // <-- pass your migrations to Executive here +//! >; +//! ``` +//! +//! ### Ensuring Single Block Migration Safety +//! +//! "My migration unit tests pass, so it should be safe to deploy right?" +//! +//! No! Unit tests execute the migration in a very simple test environment, and cannot account +//! for the complexities of a real runtime or real on-chain state. +//! +//! Prior to deploying migrations, it is critical to perform additional checks to ensure that when +//! run in our real runtime they will not brick the chain due to: +//! - Panicing +//! - Touching too many storage keys and resulting in an excessively large PoV +//! - Taking too long to execute +//! +//! [`try-runtime-cli`](https://github.com/paritytech/try-runtime-cli) has a sub-command +//! [`on-runtime-upgrade`](https://paritytech.github.io/try-runtime-cli/try_runtime_core/commands/enum.Action.html#variant.OnRuntimeUpgrade) +//! which is designed to help with exactly this. +//! +//! Developers MUST run this command before deploying migrations to ensure they will not +//! inadvertently result in a bricked chain. +//! +//! It is recommended to run as part of your CI pipeline. See the +//! [polkadot-sdk check-runtime-migration job](https://github.com/paritytech/polkadot-sdk/blob/4a293bc5a25be637c06ce950a34490706597615b/.gitlab/pipeline/check.yml#L103-L124) +//! for an example of how to configure this. +//! +//! ### Note on the Manipulability of PoV Size and Execution Time +//! +//! While [`try-runtime-cli`](https://github.com/paritytech/try-runtime-cli) can help ensure with +//! very high certainty that a migration will succeed given **existing** on-chain state, it cannot +//! prevent a malicious actor from manipulating state in a way that will cause the migration to take +//! longer or produce a PoV much larger than previously measured. +//! +//! Therefore, it is important to write migrations in such a way that the execution time or PoV size +//! it adds to the block cannot be easily manipulated. e.g., do not iterate over storage that can +//! quickly or cheaply be bloated. +//! +//! If writing your migration in such a way is not possible, a multi block migration should be used +//! instead. +//! +//! ### Other useful tools +//! +//! [`Chopsticks`](https://github.com/AcalaNetwork/chopsticks) is another tool in the Substrate +//! ecosystem which developers may find useful to use in addition to `try-runtime-cli` when testing +//! their single block migrations. +//! +//! ## Multi Block Migrations +//! +//! Safely and easily execute long-running migrations across multiple blocks. +//! +//! Suitable for migrations which could use arbitrary amounts of block weight. +//! +//! TODO: Link to multi block migration example/s once PR is merged (). +//! +//! [`GetStorageVersion`]: frame_support::traits::GetStorageVersion +//! [`OnRuntimeUpgrade`]: frame_support::traits::OnRuntimeUpgrade +//! [`StorageVersion`]: frame_support::traits::StorageVersion +//! [`set_code`]: frame_system::Call::set_code +//! [`set_code_without_checks`]: frame_system::Call::set_code_without_checks diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index 760bb442c16..b2c751420ce 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -92,9 +92,8 @@ pub mod cli; // TODO: @JoshOrndorff @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/54 pub mod consensus_swapping; -/// Learn about all the advance ways to test your coordinate a rutnime upgrade and data migration. -// TODO: @liamaharon https://github.com/paritytech/polkadot-sdk-docs/issues/55 -pub mod frame_runtime_migration; +/// Learn about Runtime Upgrades and best practices for writing Migrations. +pub mod frame_runtime_upgrades_and_migrations; /// Learn about light nodes, how they function, and how Substrate-based chains come /// light-node-first out of the box. diff --git a/polkadot/node/service/src/parachains_db/upgrade.rs b/polkadot/node/service/src/parachains_db/upgrade.rs index d22eebb5c8d..2eceb391b15 100644 --- a/polkadot/node/service/src/parachains_db/upgrade.rs +++ b/polkadot/node/service/src/parachains_db/upgrade.rs @@ -93,7 +93,7 @@ pub(crate) fn try_upgrade_db( } /// Try upgrading parachain's database to the next version. -/// If successfull, it returns the current version. +/// If successful, it returns the current version. pub(crate) fn try_upgrade_db_to_next_version( db_path: &Path, db_kind: DatabaseKind, diff --git a/polkadot/runtime/common/src/assigned_slots/migration.rs b/polkadot/runtime/common/src/assigned_slots/migration.rs index ba3108c0aa3..def6bad692a 100644 --- a/polkadot/runtime/common/src/assigned_slots/migration.rs +++ b/polkadot/runtime/common/src/assigned_slots/migration.rs @@ -29,14 +29,14 @@ pub mod v1 { impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - let onchain_version = Pallet::::on_chain_storage_version(); - ensure!(onchain_version < 1, "assigned_slots::MigrateToV1 migration can be deleted"); + let on_chain_version = Pallet::::on_chain_storage_version(); + ensure!(on_chain_version < 1, "assigned_slots::MigrateToV1 migration can be deleted"); Ok(Default::default()) } fn on_runtime_upgrade() -> frame_support::weights::Weight { - let onchain_version = Pallet::::on_chain_storage_version(); - if onchain_version < 1 { + let on_chain_version = Pallet::::on_chain_storage_version(); + if on_chain_version < 1 { const MAX_PERMANENT_SLOTS: u32 = 100; const MAX_TEMPORARY_SLOTS: u32 = 100; @@ -52,8 +52,8 @@ pub mod v1 { #[cfg(feature = "try-runtime")] fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { - let onchain_version = Pallet::::on_chain_storage_version(); - ensure!(onchain_version == 1, "assigned_slots::MigrateToV1 needs to be run"); + let on_chain_version = Pallet::::on_chain_storage_version(); + ensure!(on_chain_version == 1, "assigned_slots::MigrateToV1 needs to be run"); assert_eq!(>::get(), 100); assert_eq!(>::get(), 100); Ok(()) diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index 3419e3497f7..4f032f4dfa3 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -107,7 +107,7 @@ type LeasePeriodOf = <::Leaser as Leaser>>::Le pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/polkadot/runtime/common/src/crowdloan/migration.rs b/polkadot/runtime/common/src/crowdloan/migration.rs index 5133c14ada9..3afd6b3fbc9 100644 --- a/polkadot/runtime/common/src/crowdloan/migration.rs +++ b/polkadot/runtime/common/src/crowdloan/migration.rs @@ -24,9 +24,9 @@ use frame_support::{ pub struct MigrateToTrackInactiveV2(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToTrackInactiveV2 { fn on_runtime_upgrade() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 1 { + if on_chain_version == 1 { let mut translated = 0u64; for item in Funds::::iter_values() { let b = diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index 7d1b892dfa7..35d075f2ff6 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -180,7 +180,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); #[pallet::pallet] diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index 5450c4309e4..4541b88ec6f 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -106,7 +106,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 7cc5b31fc8f..b0e9d03df88 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -505,7 +505,7 @@ impl WeightInfo for TestWeightInfo { pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. /// /// v0-v1: /// v1-v2: diff --git a/polkadot/runtime/parachains/src/disputes.rs b/polkadot/runtime/parachains/src/disputes.rs index c2383dad305..da95b060c14 100644 --- a/polkadot/runtime/parachains/src/disputes.rs +++ b/polkadot/runtime/parachains/src/disputes.rs @@ -379,7 +379,7 @@ pub mod pallet { type WeightInfo: WeightInfo; } - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/polkadot/runtime/parachains/src/session_info/migration.rs b/polkadot/runtime/parachains/src/session_info/migration.rs index 228c1e3bb25..ea6f81834b5 100644 --- a/polkadot/runtime/parachains/src/session_info/migration.rs +++ b/polkadot/runtime/parachains/src/session_info/migration.rs @@ -18,5 +18,5 @@ use frame_support::traits::StorageVersion; -/// The current storage version. +/// The in-code storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); diff --git a/prdoc/pr_1554.prdoc b/prdoc/pr_1554.prdoc new file mode 100644 index 00000000000..bfce7c5edaf --- /dev/null +++ b/prdoc/pr_1554.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Runtime Upgrade ref docs and Single Block Migration example pallet + +doc: + - audience: Runtime Dev + description: | + `frame_support::traits::GetStorageVersion::current_storage_version` has been renamed `frame_support::traits::GetStorageVersion::in_code_storage_version`. + A simple find-replace is sufficient to handle this change. + +crates: + - name: "frame-support" + diff --git a/substrate/client/executor/src/executor.rs b/substrate/client/executor/src/executor.rs index 499bb704b16..d56a3b389ef 100644 --- a/substrate/client/executor/src/executor.rs +++ b/substrate/client/executor/src/executor.rs @@ -518,7 +518,7 @@ where runtime_code, ext, heap_alloc_strategy, - |_, mut instance, _onchain_version, mut ext| { + |_, mut instance, _on_chain_version, mut ext| { with_externalities_safe(&mut **ext, move || instance.call_export(method, data)) }, ); @@ -682,18 +682,18 @@ impl CodeExecutor for NativeElseWasmExecut runtime_code, ext, heap_alloc_strategy, - |_, mut instance, onchain_version, mut ext| { - let onchain_version = - onchain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; + |_, mut instance, on_chain_version, mut ext| { + let on_chain_version = + on_chain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; let can_call_with = - onchain_version.can_call_with(&self.native_version.runtime_version); + on_chain_version.can_call_with(&self.native_version.runtime_version); if use_native && can_call_with { tracing::trace!( target: "executor", native = %self.native_version.runtime_version, - chain = %onchain_version, + chain = %on_chain_version, "Request for native execution succeeded", ); @@ -705,7 +705,7 @@ impl CodeExecutor for NativeElseWasmExecut tracing::trace!( target: "executor", native = %self.native_version.runtime_version, - chain = %onchain_version, + chain = %on_chain_version, "Request for native execution failed", ); } diff --git a/substrate/frame/alliance/src/migration.rs b/substrate/frame/alliance/src/migration.rs index e3a44a7887e..432f09a16f4 100644 --- a/substrate/frame/alliance/src/migration.rs +++ b/substrate/frame/alliance/src/migration.rs @@ -19,19 +19,19 @@ use crate::{Config, Pallet, Weight, LOG_TARGET}; use frame_support::{pallet_prelude::*, storage::migration, traits::OnRuntimeUpgrade}; use log; -/// The current storage version. +/// The in-code storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); /// Wrapper for all migrations of this pallet. pub fn migrate, I: 'static>() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); let mut weight: Weight = Weight::zero(); - if onchain_version < 1 { + if on_chain_version < 1 { weight = weight.saturating_add(v0_to_v1::migrate::()); } - if onchain_version < 2 { + if on_chain_version < 2 { weight = weight.saturating_add(v1_to_v2::migrate::()); } diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index cafe7bb1a3b..60316f8cdcf 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -208,7 +208,7 @@ pub mod pallet { }; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/assets/src/migration.rs b/substrate/frame/assets/src/migration.rs index ff0ffbff0d3..dd7c12293e8 100644 --- a/substrate/frame/assets/src/migration.rs +++ b/substrate/frame/assets/src/migration.rs @@ -67,9 +67,9 @@ pub mod v1 { pub struct MigrateToV1(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current_version = Pallet::::current_storage_version(); - let onchain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 0 && current_version == 1 { + let in_code_version = Pallet::::in_code_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); + if on_chain_version == 0 && in_code_version == 1 { let mut translated = 0u64; Asset::::translate::< OldAssetDetails>, @@ -78,12 +78,12 @@ pub mod v1 { translated.saturating_inc(); Some(old_value.migrate_to_v1()) }); - current_version.put::>(); + in_code_version.put::>(); log::info!( target: LOG_TARGET, "Upgraded {} pools, storage to version {:?}", translated, - current_version + in_code_version ); T::DbWeight::get().reads_writes(translated + 1, translated + 1) } else { @@ -116,13 +116,13 @@ pub mod v1 { "the asset count before and after the migration should be the same" ); - let current_version = Pallet::::current_storage_version(); - let onchain_version = Pallet::::on_chain_storage_version(); + let in_code_version = Pallet::::in_code_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); - frame_support::ensure!(current_version == 1, "must_upgrade"); + frame_support::ensure!(in_code_version == 1, "must_upgrade"); ensure!( - current_version == onchain_version, - "after migration, the current_version and onchain_version should be the same" + in_code_version == on_chain_version, + "after migration, the in_code_version and on_chain_version should be the same" ); Asset::::iter().try_for_each(|(_id, asset)| -> Result<(), TryRuntimeError> { diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 7dd087eabd6..e87aa6f9311 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -320,7 +320,7 @@ pub mod pallet { type MaxFreezes: Get; } - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: frame_support::traits::StorageVersion = frame_support::traits::StorageVersion::new(1); diff --git a/substrate/frame/balances/src/migration.rs b/substrate/frame/balances/src/migration.rs index ba6819ec6e8..38d9c07ff7e 100644 --- a/substrate/frame/balances/src/migration.rs +++ b/substrate/frame/balances/src/migration.rs @@ -22,9 +22,9 @@ use frame_support::{ }; fn migrate_v0_to_v1, I: 'static>(accounts: &[T::AccountId]) -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 0 { + if on_chain_version == 0 { let total = accounts .iter() .map(|a| Pallet::::total_balance(a)) @@ -76,9 +76,9 @@ impl, A: Get>, I: 'static> OnRuntimeUpgrade pub struct ResetInactive(PhantomData<(T, I)>); impl, I: 'static> OnRuntimeUpgrade for ResetInactive { fn on_runtime_upgrade() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 1 { + if on_chain_version == 1 { // Remove the old `StorageVersion` type. frame_support::storage::unhashed::kill(&frame_support::storage::storage_prefix( Pallet::::name().as_bytes(), diff --git a/substrate/frame/collective/src/lib.rs b/substrate/frame/collective/src/lib.rs index c084784e0a9..dd481b53681 100644 --- a/substrate/frame/collective/src/lib.rs +++ b/substrate/frame/collective/src/lib.rs @@ -176,7 +176,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index d8939ee88ba..32d789a458f 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -234,7 +234,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; use sp_runtime::Perbill; - /// The current storage version. + /// The in-code storage version. pub(crate) const STORAGE_VERSION: StorageVersion = StorageVersion::new(15); #[pallet::pallet] diff --git a/substrate/frame/contracts/src/migration.rs b/substrate/frame/contracts/src/migration.rs index 6d61cb6b1e1..f30ae1ebfad 100644 --- a/substrate/frame/contracts/src/migration.rs +++ b/substrate/frame/contracts/src/migration.rs @@ -263,10 +263,10 @@ impl Migration { impl OnRuntimeUpgrade for Migration { fn on_runtime_upgrade() -> Weight { let name = >::name(); - let current_version = >::current_storage_version(); + let in_code_version = >::in_code_storage_version(); let on_chain_version = >::on_chain_storage_version(); - if on_chain_version == current_version { + if on_chain_version == in_code_version { log::warn!( target: LOG_TARGET, "{name}: No Migration performed storage_version = latest_version = {:?}", @@ -289,7 +289,7 @@ impl OnRuntimeUpgrade for Migration OnRuntimeUpgrade for Migration>::on_chain_storage_version(); - let current_version = >::current_storage_version(); + let in_code_version = >::in_code_storage_version(); - if on_chain_version == current_version { + if on_chain_version == in_code_version { return Ok(Default::default()) } log::debug!( target: LOG_TARGET, - "Requested migration of {} from {:?}(on-chain storage version) to {:?}(current storage version)", - >::name(), on_chain_version, current_version + "Requested migration of {} from {:?}(on-chain storage version) to {:?}(in-code storage version)", + >::name(), on_chain_version, in_code_version ); ensure!( - T::Migrations::is_upgrade_supported(on_chain_version, current_version), - "Unsupported upgrade: VERSION_RANGE should be (on-chain storage version + 1, current storage version)" + T::Migrations::is_upgrade_supported(on_chain_version, in_code_version), + "Unsupported upgrade: VERSION_RANGE should be (on-chain storage version + 1, in-code storage version)" ); Ok(Default::default()) @@ -421,7 +421,7 @@ impl Migration { }, StepResult::Completed { steps_done } => { in_progress_version.put::>(); - if >::current_storage_version() != in_progress_version { + if >::in_code_storage_version() != in_progress_version { log::info!( target: LOG_TARGET, "{name}: Next migration is {:?},", diff --git a/substrate/frame/contracts/src/migration/v09.rs b/substrate/frame/contracts/src/migration/v09.rs index 98fcccc2c0b..f19bff9d674 100644 --- a/substrate/frame/contracts/src/migration/v09.rs +++ b/substrate/frame/contracts/src/migration/v09.rs @@ -28,7 +28,7 @@ use frame_support::{pallet_prelude::*, storage_alias, DefaultNoBound, Identity}; use sp_runtime::TryRuntimeError; use sp_std::prelude::*; -mod old { +mod v8 { use super::*; #[derive(Encode, Decode)] @@ -50,14 +50,14 @@ mod old { #[cfg(feature = "runtime-benchmarks")] pub fn store_old_dummy_code(len: usize) { use sp_runtime::traits::Hash; - let module = old::PrefabWasmModule { + let module = v8::PrefabWasmModule { instruction_weights_version: 0, initial: 0, maximum: 0, code: vec![42u8; len], }; let hash = T::Hashing::hash(&module.code); - old::CodeStorage::::insert(hash, module); + v8::CodeStorage::::insert(hash, module); } #[derive(Encode, Decode)] @@ -89,9 +89,9 @@ impl MigrationStep for Migration { fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_key) = self.last_code_hash.take() { - old::CodeStorage::::iter_from(old::CodeStorage::::hashed_key_for(last_key)) + v8::CodeStorage::::iter_from(v8::CodeStorage::::hashed_key_for(last_key)) } else { - old::CodeStorage::::iter() + v8::CodeStorage::::iter() }; if let Some((key, old)) = iter.next() { @@ -115,7 +115,7 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let sample: Vec<_> = old::CodeStorage::::iter().take(100).collect(); + let sample: Vec<_> = v8::CodeStorage::::iter().take(100).collect(); log::debug!(target: LOG_TARGET, "Taking sample of {} contract codes", sample.len()); Ok(sample.encode()) @@ -123,7 +123,7 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { - let sample = , old::PrefabWasmModule)> as Decode>::decode(&mut &state[..]) + let sample = , v8::PrefabWasmModule)> as Decode>::decode(&mut &state[..]) .expect("pre_upgrade_step provides a valid state; qed"); log::debug!(target: LOG_TARGET, "Validating sample of {} contract codes", sample.len()); diff --git a/substrate/frame/contracts/src/migration/v10.rs b/substrate/frame/contracts/src/migration/v10.rs index d64673aac7d..1bee86e6a77 100644 --- a/substrate/frame/contracts/src/migration/v10.rs +++ b/substrate/frame/contracts/src/migration/v10.rs @@ -47,7 +47,7 @@ use sp_runtime::{ }; use sp_std::prelude::*; -mod old { +mod v9 { use super::*; pub type BalanceOf = ( ) where OldCurrency: ReservableCurrency<::AccountId> + 'static, { - let info = old::ContractInfo { + let info = v9::ContractInfo { trie_id: info.trie_id, code_hash: info.code_hash, storage_bytes: Default::default(), @@ -94,7 +94,7 @@ pub fn store_old_contract_info( storage_item_deposit: Default::default(), storage_base_deposit: Default::default(), }; - old::ContractInfoOf::::insert(account, info); + v9::ContractInfoOf::::insert(account, info); } #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebugNoBound, TypeInfo, MaxEncodedLen)] @@ -120,9 +120,9 @@ where pub code_hash: CodeHash, storage_bytes: u32, storage_items: u32, - pub storage_byte_deposit: old::BalanceOf, - storage_item_deposit: old::BalanceOf, - storage_base_deposit: old::BalanceOf, + pub storage_byte_deposit: v9::BalanceOf, + storage_item_deposit: v9::BalanceOf, + storage_base_deposit: v9::BalanceOf, } #[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] @@ -152,7 +152,7 @@ fn deposit_address( impl MigrationStep for Migration where OldCurrency: ReservableCurrency<::AccountId> - + Inspect<::AccountId, Balance = old::BalanceOf>, + + Inspect<::AccountId, Balance = v9::BalanceOf>, { const VERSION: u16 = 10; @@ -162,11 +162,11 @@ where fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_account) = self.last_account.take() { - old::ContractInfoOf::::iter_from( - old::ContractInfoOf::::hashed_key_for(last_account), + v9::ContractInfoOf::::iter_from( + v9::ContractInfoOf::::hashed_key_for(last_account), ) } else { - old::ContractInfoOf::::iter() + v9::ContractInfoOf::::iter() }; if let Some((account, contract)) = iter.next() { @@ -276,7 +276,7 @@ where #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let sample: Vec<_> = old::ContractInfoOf::::iter().take(10).collect(); + let sample: Vec<_> = v9::ContractInfoOf::::iter().take(10).collect(); log::debug!(target: LOG_TARGET, "Taking sample of {} contracts", sample.len()); Ok(sample.encode()) @@ -284,7 +284,7 @@ where #[cfg(feature = "try-runtime")] fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { - let sample = )> as Decode>::decode( + let sample = )> as Decode>::decode( &mut &state[..], ) .expect("pre_upgrade_step provides a valid state; qed"); diff --git a/substrate/frame/contracts/src/migration/v11.rs b/substrate/frame/contracts/src/migration/v11.rs index a5b11f6e089..9bfbb25edfb 100644 --- a/substrate/frame/contracts/src/migration/v11.rs +++ b/substrate/frame/contracts/src/migration/v11.rs @@ -29,7 +29,7 @@ use sp_runtime::TryRuntimeError; use codec::{Decode, Encode}; use frame_support::{pallet_prelude::*, storage_alias, DefaultNoBound}; use sp_std::{marker::PhantomData, prelude::*}; -mod old { +mod v10 { use super::*; #[derive(Encode, Decode, TypeInfo, MaxEncodedLen)] @@ -51,11 +51,11 @@ pub struct DeletionQueueManager { #[cfg(any(feature = "runtime-benchmarks", feature = "try-runtime"))] pub fn fill_old_queue(len: usize) { - let queue: Vec = - core::iter::repeat_with(|| old::DeletedContract { trie_id: Default::default() }) + let queue: Vec = + core::iter::repeat_with(|| v10::DeletedContract { trie_id: Default::default() }) .take(len) .collect(); - old::DeletionQueue::::set(Some(queue)); + v10::DeletionQueue::::set(Some(queue)); } #[storage_alias] @@ -80,7 +80,7 @@ impl MigrationStep for Migration { } fn step(&mut self) -> (IsFinished, Weight) { - let Some(old_queue) = old::DeletionQueue::::take() else { + let Some(old_queue) = v10::DeletionQueue::::take() else { return (IsFinished::Yes, Weight::zero()) }; let len = old_queue.len(); @@ -106,7 +106,7 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let old_queue = old::DeletionQueue::::take().unwrap_or_default(); + let old_queue = v10::DeletionQueue::::take().unwrap_or_default(); if old_queue.is_empty() { let len = 10u32; diff --git a/substrate/frame/contracts/src/migration/v12.rs b/substrate/frame/contracts/src/migration/v12.rs index 7dee3150310..d9128286df3 100644 --- a/substrate/frame/contracts/src/migration/v12.rs +++ b/substrate/frame/contracts/src/migration/v12.rs @@ -34,7 +34,7 @@ use sp_runtime::TryRuntimeError; use sp_runtime::{traits::Zero, FixedPointNumber, FixedU128, Saturating}; use sp_std::prelude::*; -mod old { +mod v11 { use super::*; pub type BalanceOf = , #[codec(compact)] - deposit: old::BalanceOf, + deposit: v11::BalanceOf, #[codec(compact)] refcount: u64, determinism: Determinism, @@ -112,17 +112,17 @@ where let hash = T::Hashing::hash(&code); PristineCode::::insert(hash, code.clone()); - let module = old::PrefabWasmModule { + let module = v11::PrefabWasmModule { instruction_weights_version: Default::default(), initial: Default::default(), maximum: Default::default(), code, determinism: Determinism::Enforced, }; - old::CodeStorage::::insert(hash, module); + v11::CodeStorage::::insert(hash, module); - let info = old::OwnerInfo { owner: account, deposit: u32::MAX.into(), refcount: u64::MAX }; - old::OwnerInfoOf::::insert(hash, info); + let info = v11::OwnerInfo { owner: account, deposit: u32::MAX.into(), refcount: u64::MAX }; + v11::OwnerInfoOf::::insert(hash, info); } #[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] @@ -148,16 +148,16 @@ where fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_key) = self.last_code_hash.take() { - old::OwnerInfoOf::::iter_from( - old::OwnerInfoOf::::hashed_key_for(last_key), + v11::OwnerInfoOf::::iter_from( + v11::OwnerInfoOf::::hashed_key_for(last_key), ) } else { - old::OwnerInfoOf::::iter() + v11::OwnerInfoOf::::iter() }; if let Some((hash, old_info)) = iter.next() { log::debug!(target: LOG_TARGET, "Migrating OwnerInfo for code_hash {:?}", hash); - let module = old::CodeStorage::::take(hash) + let module = v11::CodeStorage::::take(hash) .expect(format!("No PrefabWasmModule found for code_hash: {:?}", hash).as_str()); let code_len = module.code.len(); @@ -184,7 +184,7 @@ where let bytes_before = module .encoded_size() .saturating_add(code_len) - .saturating_add(old::OwnerInfo::::max_encoded_len()) + .saturating_add(v11::OwnerInfo::::max_encoded_len()) as u32; let items_before = 3u32; let deposit_expected_before = price_per_byte @@ -241,10 +241,10 @@ where fn pre_upgrade_step() -> Result, TryRuntimeError> { let len = 100; log::debug!(target: LOG_TARGET, "Taking sample of {} OwnerInfo(s)", len); - let sample: Vec<_> = old::OwnerInfoOf::::iter() + let sample: Vec<_> = v11::OwnerInfoOf::::iter() .take(len) .map(|(k, v)| { - let module = old::CodeStorage::::get(k) + let module = v11::CodeStorage::::get(k) .expect("No PrefabWasmModule found for code_hash: {:?}"); let info: CodeInfo = CodeInfo { determinism: module.determinism, @@ -258,9 +258,9 @@ where .collect(); let storage: u32 = - old::CodeStorage::::iter().map(|(_k, v)| v.encoded_size() as u32).sum(); - let mut deposit: old::BalanceOf = Default::default(); - old::OwnerInfoOf::::iter().for_each(|(_k, v)| deposit += v.deposit); + v11::CodeStorage::::iter().map(|(_k, v)| v.encoded_size() as u32).sum(); + let mut deposit: v11::BalanceOf = Default::default(); + v11::OwnerInfoOf::::iter().for_each(|(_k, v)| deposit += v.deposit); Ok((sample, deposit, storage).encode()) } @@ -269,7 +269,7 @@ where fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { let state = <( Vec<(CodeHash, CodeInfo)>, - old::BalanceOf, + v11::BalanceOf, u32, ) as Decode>::decode(&mut &state[..]) .unwrap(); @@ -283,7 +283,7 @@ where ensure!(info.refcount == old.refcount, "invalid refcount"); } - if let Some((k, _)) = old::CodeStorage::::iter().next() { + if let Some((k, _)) = v11::CodeStorage::::iter().next() { log::warn!( target: LOG_TARGET, "CodeStorage is still NOT empty, found code_hash: {:?}", @@ -292,7 +292,7 @@ where } else { log::debug!(target: LOG_TARGET, "CodeStorage is empty."); } - if let Some((k, _)) = old::OwnerInfoOf::::iter().next() { + if let Some((k, _)) = v11::OwnerInfoOf::::iter().next() { log::warn!( target: LOG_TARGET, "OwnerInfoOf is still NOT empty, found code_hash: {:?}", @@ -302,7 +302,7 @@ where log::debug!(target: LOG_TARGET, "OwnerInfoOf is empty."); } - let mut deposit: old::BalanceOf = Default::default(); + let mut deposit: v11::BalanceOf = Default::default(); let mut items = 0u32; let mut storage_info = 0u32; CodeInfoOf::::iter().for_each(|(_k, v)| { diff --git a/substrate/frame/contracts/src/migration/v13.rs b/substrate/frame/contracts/src/migration/v13.rs index dd2eb12eb62..498c44d53ab 100644 --- a/substrate/frame/contracts/src/migration/v13.rs +++ b/substrate/frame/contracts/src/migration/v13.rs @@ -28,7 +28,7 @@ use frame_support::{pallet_prelude::*, storage_alias, DefaultNoBound}; use sp_runtime::BoundedBTreeMap; use sp_std::prelude::*; -mod old { +mod v12 { use super::*; #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -59,7 +59,7 @@ pub fn store_old_contract_info(account: T::AccountId, info: crate::Co let entropy = (b"contract_depo_v1", account.clone()).using_encoded(T::Hashing::hash); let deposit_account = Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) .expect("infinite length input; no invalid inputs for type; qed"); - let info = old::ContractInfo { + let info = v12::ContractInfo { trie_id: info.trie_id.clone(), deposit_account, code_hash: info.code_hash, @@ -69,7 +69,7 @@ pub fn store_old_contract_info(account: T::AccountId, info: crate::Co storage_item_deposit: Default::default(), storage_base_deposit: Default::default(), }; - old::ContractInfoOf::::insert(account, info); + v12::ContractInfoOf::::insert(account, info); } #[storage_alias] @@ -104,11 +104,11 @@ impl MigrationStep for Migration { fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_account) = self.last_account.take() { - old::ContractInfoOf::::iter_from(old::ContractInfoOf::::hashed_key_for( + v12::ContractInfoOf::::iter_from(v12::ContractInfoOf::::hashed_key_for( last_account, )) } else { - old::ContractInfoOf::::iter() + v12::ContractInfoOf::::iter() }; if let Some((key, old)) = iter.next() { diff --git a/substrate/frame/contracts/src/migration/v14.rs b/substrate/frame/contracts/src/migration/v14.rs index 94534d05fdf..09da09e5bcf 100644 --- a/substrate/frame/contracts/src/migration/v14.rs +++ b/substrate/frame/contracts/src/migration/v14.rs @@ -44,7 +44,7 @@ use sp_runtime::{traits::Zero, Saturating}; #[cfg(feature = "try-runtime")] use sp_std::collections::btree_map::BTreeMap; -mod old { +mod v13 { use super::*; pub type BalanceOf = , #[codec(compact)] - pub deposit: old::BalanceOf, + pub deposit: v13::BalanceOf, #[codec(compact)] pub refcount: u64, pub determinism: Determinism, @@ -86,14 +86,14 @@ where let code = vec![42u8; len as usize]; let hash = T::Hashing::hash(&code); - let info = old::CodeInfo { + let info = v13::CodeInfo { owner: account, deposit: 10_000u32.into(), refcount: u64::MAX, determinism: Determinism::Enforced, code_len: len, }; - old::CodeInfoOf::::insert(hash, info); + v13::CodeInfoOf::::insert(hash, info); } #[cfg(feature = "try-runtime")] @@ -105,9 +105,9 @@ where OldCurrency: ReservableCurrency<::AccountId>, { /// Total reserved balance as code upload deposit for the owner. - reserved: old::BalanceOf, + reserved: v13::BalanceOf, /// Total balance of the owner. - total: old::BalanceOf, + total: v13::BalanceOf, } #[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] @@ -134,11 +134,11 @@ where fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_hash) = self.last_code_hash.take() { - old::CodeInfoOf::::iter_from( - old::CodeInfoOf::::hashed_key_for(last_hash), + v13::CodeInfoOf::::iter_from( + v13::CodeInfoOf::::hashed_key_for(last_hash), ) } else { - old::CodeInfoOf::::iter() + v13::CodeInfoOf::::iter() }; if let Some((hash, code_info)) = iter.next() { @@ -194,7 +194,7 @@ where #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let info: Vec<_> = old::CodeInfoOf::::iter().collect(); + let info: Vec<_> = v13::CodeInfoOf::::iter().collect(); let mut owner_balance_allocation = BTreeMap::, BalanceAllocation>::new(); diff --git a/substrate/frame/contracts/src/migration/v15.rs b/substrate/frame/contracts/src/migration/v15.rs index 180fe855ca6..c77198d6fea 100644 --- a/substrate/frame/contracts/src/migration/v15.rs +++ b/substrate/frame/contracts/src/migration/v15.rs @@ -46,7 +46,7 @@ use sp_runtime::{traits::Zero, Saturating}; #[cfg(feature = "try-runtime")] use sp_std::vec::Vec; -mod old { +mod v14 { use super::*; #[derive( @@ -81,7 +81,7 @@ pub fn store_old_contract_info(account: T::AccountId, info: crate::Co let entropy = (b"contract_depo_v1", account.clone()).using_encoded(T::Hashing::hash); let deposit_account = Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) .expect("infinite length input; no invalid inputs for type; qed"); - let info = old::ContractInfo { + let info = v14::ContractInfo { trie_id: info.trie_id.clone(), deposit_account, code_hash: info.code_hash, @@ -92,7 +92,7 @@ pub fn store_old_contract_info(account: T::AccountId, info: crate::Co storage_base_deposit: info.storage_base_deposit(), delegate_dependencies: info.delegate_dependencies().clone(), }; - old::ContractInfoOf::::insert(account, info); + v14::ContractInfoOf::::insert(account, info); } #[derive(Encode, Decode, CloneNoBound, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -127,11 +127,11 @@ impl MigrationStep for Migration { fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_account) = self.last_account.take() { - old::ContractInfoOf::::iter_from(old::ContractInfoOf::::hashed_key_for( + v14::ContractInfoOf::::iter_from(v14::ContractInfoOf::::hashed_key_for( last_account, )) } else { - old::ContractInfoOf::::iter() + v14::ContractInfoOf::::iter() }; if let Some((account, old_contract)) = iter.next() { @@ -243,11 +243,11 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let sample: Vec<_> = old::ContractInfoOf::::iter().take(100).collect(); + let sample: Vec<_> = v14::ContractInfoOf::::iter().take(100).collect(); log::debug!(target: LOG_TARGET, "Taking sample of {} contracts", sample.len()); - let state: Vec<(T::AccountId, old::ContractInfo, BalanceOf, BalanceOf)> = sample + let state: Vec<(T::AccountId, v14::ContractInfo, BalanceOf, BalanceOf)> = sample .iter() .map(|(account, contract)| { ( @@ -265,7 +265,7 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { let sample = - , BalanceOf, BalanceOf)> as Decode>::decode( + , BalanceOf, BalanceOf)> as Decode>::decode( &mut &state[..], ) .expect("pre_upgrade_step provides a valid state; qed"); diff --git a/substrate/frame/democracy/src/lib.rs b/substrate/frame/democracy/src/lib.rs index 089556191cd..08e2a7599f5 100644 --- a/substrate/frame/democracy/src/lib.rs +++ b/substrate/frame/democracy/src/lib.rs @@ -211,7 +211,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 4f43f89abed..6bf4dfe4f1e 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -1343,7 +1343,7 @@ pub mod pallet { #[pallet::getter(fn minimum_untrusted_score)] pub type MinimumUntrustedScore = StorageValue<_, ElectionScore>; - /// The current storage version. + /// The in-code storage version. /// /// v1: https://github.com/paritytech/substrate/pull/12237/ const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); diff --git a/substrate/frame/election-provider-multi-phase/src/migrations.rs b/substrate/frame/election-provider-multi-phase/src/migrations.rs index 50b821e6db6..156f1c02e27 100644 --- a/substrate/frame/election-provider-multi-phase/src/migrations.rs +++ b/substrate/frame/election-provider-multi-phase/src/migrations.rs @@ -27,12 +27,12 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs index a078361a5f7..308f2cdc1aa 100644 --- a/substrate/frame/elections-phragmen/src/lib.rs +++ b/substrate/frame/elections-phragmen/src/lib.rs @@ -188,7 +188,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index eb6355edd31..45c7440eb89 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -23,6 +23,7 @@ pallet-example-frame-crate = { path = "frame-crate", default-features = false } pallet-example-kitchensink = { path = "kitchensink", default-features = false } pallet-example-offchain-worker = { path = "offchain-worker", default-features = false } pallet-example-split = { path = "split", default-features = false } +pallet-example-single-block-migrations = { path = "single-block-migrations", default-features = false } pallet-example-tasks = { path = "tasks", default-features = false } [features] @@ -34,6 +35,7 @@ std = [ "pallet-example-frame-crate/std", "pallet-example-kitchensink/std", "pallet-example-offchain-worker/std", + "pallet-example-single-block-migrations/std", "pallet-example-split/std", "pallet-example-tasks/std", ] @@ -43,6 +45,7 @@ try-runtime = [ "pallet-example-basic/try-runtime", "pallet-example-kitchensink/try-runtime", "pallet-example-offchain-worker/try-runtime", + "pallet-example-single-block-migrations/try-runtime", "pallet-example-split/try-runtime", "pallet-example-tasks/try-runtime", ] diff --git a/substrate/frame/examples/single-block-migrations/Cargo.toml b/substrate/frame/examples/single-block-migrations/Cargo.toml new file mode 100644 index 00000000000..5059b2433c7 --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/Cargo.toml @@ -0,0 +1,61 @@ +[package] +name = "pallet-example-single-block-migrations" +version = "0.0.1" +authors.workspace = true +edition.workspace = true +license = "MIT-0" +homepage = "https://substrate.io" +repository.workspace = true +description = "FRAME example pallet demonstrating best-practices for writing storage migrations." +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +docify = { version = "0.2.3", default-features = false } +log = { version = "0.4.20", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +frame-support = { path = "../../support", default-features = false } +frame-executive = { path = "../../executive", default-features = false } +frame-system = { path = "../../system", default-features = false } +frame-try-runtime = { path = "../../try-runtime", default-features = false, optional = true } +pallet-balances = { path = "../../balances", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-core = { path = "../../../primitives/core", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-version = { path = "../../../primitives/version", default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-executive/std", + "frame-support/std", + "frame-system/std", + "frame-try-runtime/std", + "log/std", + "pallet-balances/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "sp-version/std", +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/examples/single-block-migrations/src/lib.rs b/substrate/frame/examples/single-block-migrations/src/lib.rs new file mode 100644 index 00000000000..86a9e5d6e95 --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/src/lib.rs @@ -0,0 +1,213 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Single Block Migration Example Pallet +//! +//! An example pallet demonstrating best-practices for writing single-block migrations in the +//! context of upgrading pallet storage. +//! +//! ## Forwarning +//! +//! Single block migrations **MUST** execute in a single block, therefore when executed on a +//! parachain are only appropriate when guaranteed to not exceed block weight limits. If a +//! parachain submits a block that exceeds the block weight limit it will **brick the chain**! +//! +//! If weight is a concern or you are not sure which type of migration to use, you should probably +//! use a multi-block migration. +//! +//! TODO: Link above to multi-block migration example. +//! +//! ## Pallet Overview +//! +//! This example pallet contains a single storage item [`Value`](pallet::Value), which may be set by +//! any signed origin by calling the [`set_value`](crate::Call::set_value) extrinsic. +//! +//! For the purposes of this exercise, we imagine that in [`StorageVersion`] V0 of this pallet +//! [`Value`](pallet::Value) is a `u32`, and this what is currently stored on-chain. +//! +//! ```ignore +//! // (Old) Storage Version V0 representation of `Value` +//! #[pallet::storage] +//! pub type Value = StorageValue<_, u32>; +//! ``` +//! +//! In [`StorageVersion`] V1 of the pallet a new struct [`CurrentAndPreviousValue`] is introduced: +#![doc = docify::embed!("src/lib.rs", CurrentAndPreviousValue)] +//! and [`Value`](pallet::Value) is updated to store this new struct instead of a `u32`: +#![doc = docify::embed!("src/lib.rs", Value)] +//! +//! In StorageVersion V1 of the pallet when [`set_value`](crate::Call::set_value) is called, the +//! new value is stored in the `current` field of [`CurrentAndPreviousValue`], and the previous +//! value (if it exists) is stored in the `previous` field. +#![doc = docify::embed!("src/lib.rs", pallet_calls)] +//! +//! ## Why a migration is necessary +//! +//! Without a migration, there will be a discrepancy between the on-chain storage for [`Value`] (in +//! V0 it is a `u32`) and the current storage for [`Value`] (in V1 it was changed to a +//! [`CurrentAndPreviousValue`] struct). +//! +//! The on-chain storage for [`Value`] would be a `u32` but the runtime would try to read it as a +//! [`CurrentAndPreviousValue`]. This would result in unacceptable undefined behavior. +//! +//! ## Adding a migration module +//! +//! Writing a pallets migrations in a seperate module is strongly recommended. +//! +//! Here's how the migration module is defined for this pallet: +//! +//! ```text +//! substrate/frame/examples/single-block-migrations/src/ +//! β”œβ”€β”€ lib.rs <-- pallet definition +//! β”œβ”€β”€ Cargo.toml <-- pallet manifest +//! └── migrations/ +//! β”œβ”€β”€ mod.rs <-- migrations module definition +//! └── v1.rs <-- migration logic for the V0 to V1 transition +//! ``` +//! +//! This structure allows keeping migration logic separate from the pallet logic and +//! easily adding new migrations in the future. +//! +//! ## Writing the Migration +//! +//! All code related to the migration can be found under +//! [`v1.rs`](migrations::v1). +//! +//! See the migration source code for detailed comments. +//! +//! To keep the migration logic organised, it is split across additional modules: +//! +//! ### `mod v0` +//! +//! Here we define a [`storage_alias`](frame_support::storage_alias) for the old v0 [`Value`] +//! format. +//! +//! This allows reading the old v0 value from storage during the migration. +//! +//! ### `mod version_unchecked` +//! +//! Here we define our raw migration logic, +//! `version_unchecked::MigrateV0ToV1` which implements the [`OnRuntimeUpgrade`] trait. +//! +//! Importantly, it is kept in a private module so that it cannot be accidentally used in a runtime. +//! +//! Private modules cannot be referenced in docs, so please read the code directly. +//! +//! #### Standalone Struct or Pallet Hook? +//! +//! Note that the storage migration logic is attached to a standalone struct implementing +//! [`OnRuntimeUpgrade`], rather than implementing the +//! [`Hooks::on_runtime_upgrade`](frame_support::traits::Hooks::on_runtime_upgrade) hook directly on +//! the pallet. The pallet hook is better suited for special types of logic that need to execute on +//! every runtime upgrade, but not so much for one-off storage migrations. +//! +//! ### `pub mod versioned` +//! +//! Here, `version_unchecked::MigrateV0ToV1` is wrapped in a +//! [`VersionedMigration`] to define +//! [`versioned::MigrateV0ToV1`](crate::migrations::v1::versioned::MigrateV0ToV1), which may be used +//! in runtimes. +//! +//! Using [`VersionedMigration`] ensures that +//! - The migration only runs once when the on-chain storage version is `0` +//! - The on-chain storage version is updated to `1` after the migration executes +//! - Reads and writes from checking and setting the on-chain storage version are accounted for in +//! the final [`Weight`](frame_support::weights::Weight) +//! +//! This is the only public module exported from `v1`. +//! +//! ### `mod test` +//! +//! Here basic unit tests are defined for the migration. +//! +//! When writing migration tests, don't forget to check: +//! - `on_runtime_upgrade` returns the expected weight +//! - `post_upgrade` succeeds when given the bytes returned by `pre_upgrade` +//! - Pallet storage is in the expected state after the migration +//! +//! [`VersionedMigration`]: frame_support::migrations::VersionedMigration +//! [`GetStorageVersion`]: frame_support::traits::GetStorageVersion +//! [`OnRuntimeUpgrade`]: frame_support::traits::OnRuntimeUpgrade +//! [`MigrateV0ToV1`]: crate::migrations::v1::versioned::MigrationV0ToV1 + +// We make sure this pallet uses `no_std` for compiling to Wasm. +#![cfg_attr(not(feature = "std"), no_std)] +// allow non-camel-case names for storage version V0 value +#![allow(non_camel_case_types)] + +// Re-export pallet items so that they can be accessed from the crate namespace. +pub use pallet::*; + +// Export migrations so they may be used in the runtime. +pub mod migrations; +#[doc(hidden)] +mod mock; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::traits::StorageVersion; +use sp_runtime::RuntimeDebug; + +/// Example struct holding the most recently set [`u32`] and the +/// second most recently set [`u32`] (if one existed). +#[docify::export] +#[derive( + Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, scale_info::TypeInfo, MaxEncodedLen, +)] +pub struct CurrentAndPreviousValue { + /// The most recently set value. + pub current: u32, + /// The previous value, if one existed. + pub previous: Option, +} + +// Pallet for demonstrating storage migrations. +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// Define the current [`StorageVersion`] of the pallet. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + /// [`StorageVersion`] V1 of [`Value`]. + /// + /// Currently used. + #[docify::export] + #[pallet::storage] + pub type Value = StorageValue<_, CurrentAndPreviousValue>; + + #[docify::export(pallet_calls)] + #[pallet::call] + impl Pallet { + pub fn set_value(origin: OriginFor, value: u32) -> DispatchResult { + ensure_signed(origin)?; + + let previous = Value::::get().map(|v| v.current); + let new_struct = CurrentAndPreviousValue { current: value, previous }; + >::put(new_struct); + + Ok(()) + } + } +} diff --git a/substrate/frame/examples/single-block-migrations/src/migrations/mod.rs b/substrate/frame/examples/single-block-migrations/src/migrations/mod.rs new file mode 100644 index 00000000000..80a33f69941 --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/src/migrations/mod.rs @@ -0,0 +1,20 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Module containing all logic associated with the example migration from +/// [`StorageVersion`](frame_support::traits::StorageVersion) V0 to V1. +pub mod v1; diff --git a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs new file mode 100644 index 00000000000..b46640a3202 --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs @@ -0,0 +1,222 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{ + storage_alias, + traits::{Get, OnRuntimeUpgrade}, +}; + +#[cfg(feature = "try-runtime")] +use sp_std::vec::Vec; + +/// Collection of storage item formats from the previous storage version. +/// +/// Required so we can read values in the v0 storage format during the migration. +mod v0 { + use super::*; + + /// V0 type for [`crate::Value`]. + #[storage_alias] + pub type Value = StorageValue, u32>; +} + +/// Private module containing *version unchecked* migration logic. +/// +/// Should only be used by the [`VersionedMigration`](frame_support::migrations::VersionedMigration) +/// type in this module to create something to export. +/// +/// The unversioned migration should be kept private so the unversioned migration cannot +/// accidentally be used in any runtimes. +/// +/// For more about this pattern of keeping items private, see +/// - +/// - +mod version_unchecked { + use super::*; + + /// Implements [`OnRuntimeUpgrade`], migrating the state of this pallet from V0 to V1. + /// + /// In V0 of the template [`crate::Value`] is just a `u32`. In V1, it has been upgraded to + /// contain the struct [`crate::CurrentAndPreviousValue`]. + /// + /// In this migration, update the on-chain storage for the pallet to reflect the new storage + /// layout. + pub struct MigrateV0ToV1(sp_std::marker::PhantomData); + + impl OnRuntimeUpgrade for MigrateV0ToV1 { + /// Return the existing [`crate::Value`] so we can check that it was correctly set in + /// `version_unchecked::MigrateV0ToV1::post_upgrade`. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use codec::Encode; + + // Access the old value using the `storage_alias` type + let old_value = v0::Value::::get(); + // Return it as an encoded `Vec` + Ok(old_value.encode()) + } + + /// Migrate the storage from V0 to V1. + /// + /// - If the value doesn't exist, there is nothing to do. + /// - If the value exists, it is read and then written back to storage inside a + /// [`crate::CurrentAndPreviousValue`]. + fn on_runtime_upgrade() -> frame_support::weights::Weight { + // Read the old value from storage + if let Some(old_value) = v0::Value::::take() { + // Write the new value to storage + let new = crate::CurrentAndPreviousValue { current: old_value, previous: None }; + crate::Value::::put(new); + // One read for the old value, one write for the new value + T::DbWeight::get().reads_writes(1, 1) + } else { + // One read for trying to access the old value + T::DbWeight::get().reads(1) + } + } + + /// Verifies the storage was migrated correctly. + /// + /// - If there was no old value, the new value should not be set. + /// - If there was an old value, the new value should be a + /// [`crate::CurrentAndPreviousValue`]. + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + use codec::Decode; + use frame_support::ensure; + + let maybe_old_value = Option::::decode(&mut &state[..]).map_err(|_| { + sp_runtime::TryRuntimeError::Other("Failed to decode old value from storage") + })?; + + match maybe_old_value { + Some(old_value) => { + let expected_new_value = + crate::CurrentAndPreviousValue { current: old_value, previous: None }; + let actual_new_value = crate::Value::::get(); + + ensure!(actual_new_value.is_some(), "New value not set"); + ensure!( + actual_new_value == Some(expected_new_value), + "New value not set correctly" + ); + }, + None => { + ensure!(crate::Value::::get().is_none(), "New value unexpectedly set"); + }, + }; + Ok(()) + } + } +} + +/// Public module containing *version checked* migration logic. +/// +/// This is the only module that should be exported from this module. +/// +/// See [`VersionedMigration`](frame_support::migrations::VersionedMigration) docs for more about +/// how it works. +pub mod versioned { + use super::*; + + /// `version_unchecked::MigrateV0ToV1` wrapped in a + /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), which ensures that: + /// - The migration only runs once when the on-chain storage version is 0 + /// - The on-chain storage version is updated to `1` after the migration executes + /// - Reads/Writes from checking/settings the on-chain storage version are accounted for + pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< + 0, // The migration will only execute when the on-chain storage version is 0 + 1, // The on-chain storage version will be set to 1 after the migration is complete + version_unchecked::MigrateV0ToV1, + crate::pallet::Pallet, + ::DbWeight, + >; +} + +/// Tests for our migration. +/// +/// When writing migration tests, it is important to check: +/// 1. `on_runtime_upgrade` returns the expected weight +/// 2. `post_upgrade` succeeds when given the bytes returned by `pre_upgrade` +/// 3. The storage is in the expected state after the migration +#[cfg(any(all(feature = "try-runtime", test), doc))] +mod test { + use super::*; + use crate::mock::{new_test_ext, MockRuntime}; + use frame_support::assert_ok; + use version_unchecked::MigrateV0ToV1; + + #[test] + fn handles_no_existing_value() { + new_test_ext().execute_with(|| { + // By default, no value should be set. Verify this assumption. + assert!(crate::Value::::get().is_none()); + assert!(v0::Value::::get().is_none()); + + // Get the pre_upgrade bytes + let bytes = match MigrateV0ToV1::::pre_upgrade() { + Ok(bytes) => bytes, + Err(e) => panic!("pre_upgrade failed: {:?}", e), + }; + + // Execute the migration + let weight = MigrateV0ToV1::::on_runtime_upgrade(); + + // Verify post_upgrade succeeds + assert_ok!(MigrateV0ToV1::::post_upgrade(bytes)); + + // The weight should be just 1 read for trying to access the old value. + assert_eq!(weight, ::DbWeight::get().reads(1)); + + // After the migration, no value should have been set. + assert!(crate::Value::::get().is_none()); + }) + } + + #[test] + fn handles_existing_value() { + new_test_ext().execute_with(|| { + // Set up an initial value + let initial_value = 42; + v0::Value::::put(initial_value); + + // Get the pre_upgrade bytes + let bytes = match MigrateV0ToV1::::pre_upgrade() { + Ok(bytes) => bytes, + Err(e) => panic!("pre_upgrade failed: {:?}", e), + }; + + // Execute the migration + let weight = MigrateV0ToV1::::on_runtime_upgrade(); + + // Verify post_upgrade succeeds + assert_ok!(MigrateV0ToV1::::post_upgrade(bytes)); + + // The weight used should be 1 read for the old value, and 1 write for the new + // value. + assert_eq!( + weight, + ::DbWeight::get().reads_writes(1, 1) + ); + + // After the migration, the new value should be set as the `current` value. + let expected_new_value = + crate::CurrentAndPreviousValue { current: initial_value, previous: None }; + assert_eq!(crate::Value::::get(), Some(expected_new_value)); + }) + } +} diff --git a/substrate/frame/examples/single-block-migrations/src/mock.rs b/substrate/frame/examples/single-block-migrations/src/mock.rs new file mode 100644 index 00000000000..02f72e01836 --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/src/mock.rs @@ -0,0 +1,69 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(any(all(feature = "try-runtime", test), doc))] + +use crate::*; +use frame_support::{derive_impl, traits::ConstU64, weights::constants::ParityDbWeight}; + +// Re-export crate as its pallet name for construct_runtime. +use crate as pallet_example_storage_migration; + +type Block = frame_system::mocking::MockBlock; + +// For testing the pallet, we construct a mock runtime. +frame_support::construct_runtime!( + pub struct MockRuntime { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Example: pallet_example_storage_migration::{Pallet, Call, Storage}, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for MockRuntime { + type Block = Block; + type AccountData = pallet_balances::AccountData; + type DbWeight = ParityDbWeight; +} + +impl pallet_balances::Config for MockRuntime { + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = u64; + type DustRemoval = (); + type RuntimeEvent = RuntimeEvent; + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); +} + +impl Config for MockRuntime {} + +pub fn new_test_ext() -> sp_io::TestExternalities { + use sp_runtime::BuildStorage; + + let t = RuntimeGenesisConfig { system: Default::default(), balances: Default::default() } + .build_storage() + .unwrap(); + t.into() +} diff --git a/substrate/frame/examples/src/lib.rs b/substrate/frame/examples/src/lib.rs index f38bbe52dc1..dee23a41379 100644 --- a/substrate/frame/examples/src/lib.rs +++ b/substrate/frame/examples/src/lib.rs @@ -43,6 +43,9 @@ //! - [`pallet_example_frame_crate`]: Example pallet showcasing how one can be //! built using only the `frame` umbrella crate. //! +//! - [`pallet_example_single_block_migrations`]: An example pallet demonstrating best-practices for +//! writing storage migrations. +//! //! - [`pallet_example_tasks`]: This pallet demonstrates the use of `Tasks` to execute service work. //! //! **Tip**: Use `cargo doc --package --open` to view each pallet's documentation. diff --git a/substrate/frame/fast-unstake/src/migrations.rs b/substrate/frame/fast-unstake/src/migrations.rs index 56438840704..97ad86bfff4 100644 --- a/substrate/frame/fast-unstake/src/migrations.rs +++ b/substrate/frame/fast-unstake/src/migrations.rs @@ -33,12 +33,12 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs index 0b9f2b35827..90bcd8721df 100644 --- a/substrate/frame/grandpa/src/lib.rs +++ b/substrate/frame/grandpa/src/lib.rs @@ -73,7 +73,7 @@ pub mod pallet { use frame_support::{dispatch::DispatchResult, pallet_prelude::*}; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); #[pallet::pallet] diff --git a/substrate/frame/im-online/src/lib.rs b/substrate/frame/im-online/src/lib.rs index 1de89dd00c8..f14093aa09a 100644 --- a/substrate/frame/im-online/src/lib.rs +++ b/substrate/frame/im-online/src/lib.rs @@ -251,7 +251,7 @@ type OffchainResult = Result>>; pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/im-online/src/migration.rs b/substrate/frame/im-online/src/migration.rs index 0d2c0a055b6..754a2e672e6 100644 --- a/substrate/frame/im-online/src/migration.rs +++ b/substrate/frame/im-online/src/migration.rs @@ -72,7 +72,7 @@ pub mod v1 { if StorageVersion::get::>() != 0 { log::warn!( target: TARGET, - "Skipping migration because current storage version is not 0" + "Skipping migration because in-code storage version is not 0" ); return weight } diff --git a/substrate/frame/membership/src/lib.rs b/substrate/frame/membership/src/lib.rs index f3226397003..f4ac697130d 100644 --- a/substrate/frame/membership/src/lib.rs +++ b/substrate/frame/membership/src/lib.rs @@ -46,7 +46,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] diff --git a/substrate/frame/multisig/src/lib.rs b/substrate/frame/multisig/src/lib.rs index e4426c64b41..a83b78e316f 100644 --- a/substrate/frame/multisig/src/lib.rs +++ b/substrate/frame/multisig/src/lib.rs @@ -168,7 +168,7 @@ pub mod pallet { type WeightInfo: WeightInfo; } - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/multisig/src/migrations.rs b/substrate/frame/multisig/src/migrations.rs index d03e42a66a5..e6402600d0d 100644 --- a/substrate/frame/multisig/src/migrations.rs +++ b/substrate/frame/multisig/src/migrations.rs @@ -51,7 +51,7 @@ pub mod v1 { fn on_runtime_upgrade() -> Weight { use sp_runtime::Saturating; - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); if onchain > 0 { diff --git a/substrate/frame/nfts/src/lib.rs b/substrate/frame/nfts/src/lib.rs index a7d505e2e39..7cf7cdc61a7 100644 --- a/substrate/frame/nfts/src/lib.rs +++ b/substrate/frame/nfts/src/lib.rs @@ -76,7 +76,7 @@ pub mod pallet { use frame_support::{pallet_prelude::*, traits::ExistenceRequirement}; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/nfts/src/migration.rs b/substrate/frame/nfts/src/migration.rs index d4cdf7e820d..8f82e092262 100644 --- a/substrate/frame/nfts/src/migration.rs +++ b/substrate/frame/nfts/src/migration.rs @@ -54,17 +54,17 @@ pub mod v1 { pub struct MigrateToV1(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current_version = Pallet::::current_storage_version(); - let onchain_version = Pallet::::on_chain_storage_version(); + let in_code_version = Pallet::::in_code_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); log::info!( target: LOG_TARGET, - "Running migration with current storage version {:?} / onchain {:?}", - current_version, - onchain_version + "Running migration with in-code storage version {:?} / onchain {:?}", + in_code_version, + on_chain_version ); - if onchain_version == 0 && current_version == 1 { + if on_chain_version == 0 && in_code_version == 1 { let mut translated = 0u64; let mut configs_iterated = 0u64; Collection::::translate::< @@ -77,13 +77,13 @@ pub mod v1 { Some(old_value.migrate_to_v1(item_configs)) }); - current_version.put::>(); + in_code_version.put::>(); log::info!( target: LOG_TARGET, "Upgraded {} records, storage to version {:?}", translated, - current_version + in_code_version ); T::DbWeight::get().reads_writes(translated + configs_iterated + 1, translated + 1) } else { diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 074d59931ad..c64db9ed692 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -1576,7 +1576,7 @@ pub mod pallet { use frame_system::{ensure_signed, pallet_prelude::*}; use sp_runtime::Perbill; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(8); #[pallet::pallet] diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index 6887fcfa7ec..14410339f59 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -342,25 +342,25 @@ pub mod v5 { pub struct MigrateToV5(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV5 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let in_code = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", - current, + "Running migration with in-code storage version {:?} / onchain {:?}", + in_code, onchain ); - if current == 5 && onchain == 4 { + if in_code == 5 && onchain == 4 { let mut translated = 0u64; RewardPools::::translate::, _>(|_id, old_value| { translated.saturating_inc(); Some(old_value.migrate_to_v5()) }); - current.put::>(); - log!(info, "Upgraded {} pools, storage to version {:?}", translated, current); + in_code.put::>(); + log!(info, "Upgraded {} pools, storage to version {:?}", translated, in_code); // reads: translated + onchain version. // writes: translated + current.put. @@ -498,12 +498,12 @@ pub mod v4 { #[allow(deprecated)] impl> OnRuntimeUpgrade for MigrateToV4 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); @@ -579,13 +579,13 @@ pub mod v3 { pub struct MigrateToV3(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV3 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); if onchain == 2 { log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); @@ -859,12 +859,12 @@ pub mod v2 { impl OnRuntimeUpgrade for MigrateToV2 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); @@ -976,12 +976,12 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); diff --git a/substrate/frame/preimage/src/lib.rs b/substrate/frame/preimage/src/lib.rs index e344bdfe2d8..4e474685166 100644 --- a/substrate/frame/preimage/src/lib.rs +++ b/substrate/frame/preimage/src/lib.rs @@ -102,7 +102,7 @@ pub const MAX_HASH_UPGRADE_BULK_COUNT: u32 = 1024; pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::config] diff --git a/substrate/frame/referenda/src/lib.rs b/substrate/frame/referenda/src/lib.rs index c5bf2266e67..e616056c302 100644 --- a/substrate/frame/referenda/src/lib.rs +++ b/substrate/frame/referenda/src/lib.rs @@ -143,7 +143,7 @@ pub mod pallet { use frame_support::{pallet_prelude::*, traits::EnsureOriginWithArg}; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/referenda/src/migration.rs b/substrate/frame/referenda/src/migration.rs index a80897242ee..631eb7340e5 100644 --- a/substrate/frame/referenda/src/migration.rs +++ b/substrate/frame/referenda/src/migration.rs @@ -109,16 +109,16 @@ pub mod v1 { } fn on_runtime_upgrade() -> Weight { - let current_version = Pallet::::current_storage_version(); - let onchain_version = Pallet::::on_chain_storage_version(); + let in_code_version = Pallet::::in_code_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); let mut weight = T::DbWeight::get().reads(1); log::info!( target: TARGET, - "running migration with current storage version {:?} / onchain {:?}.", - current_version, - onchain_version + "running migration with in-code storage version {:?} / onchain {:?}.", + in_code_version, + on_chain_version ); - if onchain_version != 0 { + if on_chain_version != 0 { log::warn!(target: TARGET, "skipping migration from v0 to v1."); return weight } @@ -149,8 +149,8 @@ pub mod v1 { #[cfg(feature = "try-runtime")] fn post_upgrade(state: Vec) -> Result<(), TryRuntimeError> { - let onchain_version = Pallet::::on_chain_storage_version(); - ensure!(onchain_version == 1, "must upgrade from version 0 to 1."); + let on_chain_version = Pallet::::on_chain_storage_version(); + ensure!(on_chain_version == 1, "must upgrade from version 0 to 1."); let pre_referendum_count: u32 = Decode::decode(&mut &state[..]) .expect("failed to decode the state from pre-upgrade."); let post_referendum_count = ReferendumInfoFor::::iter().count() as u32; diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index daebebdee99..af3abd8ac4f 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -229,7 +229,7 @@ pub mod pallet { use frame_support::{dispatch::PostDispatchInfo, pallet_prelude::*}; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] diff --git a/substrate/frame/session/src/historical/mod.rs b/substrate/frame/session/src/historical/mod.rs index d74e9dd0b7c..b9cecea1a7f 100644 --- a/substrate/frame/session/src/historical/mod.rs +++ b/substrate/frame/session/src/historical/mod.rs @@ -58,7 +58,7 @@ pub mod pallet { use super::*; use frame_support::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs index fc35cd6ddd8..7d8128dbc1f 100644 --- a/substrate/frame/session/src/lib.rs +++ b/substrate/frame/session/src/lib.rs @@ -368,7 +368,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); #[pallet::pallet] diff --git a/substrate/frame/society/src/migrations.rs b/substrate/frame/society/src/migrations.rs index dafb1e0b9e5..6a102911451 100644 --- a/substrate/frame/society/src/migrations.rs +++ b/substrate/frame/society/src/migrations.rs @@ -40,11 +40,11 @@ impl< { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { - let current = Pallet::::current_storage_version(); - let onchain = Pallet::::on_chain_storage_version(); - ensure!(onchain == 0 && current == 2, "pallet_society: invalid version"); + let in_code = Pallet::::in_code_storage_version(); + let on_chain = Pallet::::on_chain_storage_version(); + ensure!(on_chain == 0 && in_code == 2, "pallet_society: invalid version"); - Ok((old::Candidates::::get(), old::Members::::get()).encode()) + Ok((v0::Candidates::::get(), v0::Members::::get()).encode()) } fn on_runtime_upgrade() -> Weight { @@ -103,7 +103,7 @@ pub type MigrateToV2 = frame_support::migrations::VersionedMi ::DbWeight, >; -pub(crate) mod old { +pub(crate) mod v0 { use super::*; use frame_support::storage_alias; @@ -230,37 +230,37 @@ pub fn assert_internal_consistency, I: Instance + 'static>() { } // We don't use these - make sure they don't exist. - assert_eq!(old::SuspendedCandidates::::iter().count(), 0); - assert_eq!(old::Strikes::::iter().count(), 0); - assert_eq!(old::Vouching::::iter().count(), 0); - assert!(!old::Defender::::exists()); - assert!(!old::Members::::exists()); + assert_eq!(v0::SuspendedCandidates::::iter().count(), 0); + assert_eq!(v0::Strikes::::iter().count(), 0); + assert_eq!(v0::Vouching::::iter().count(), 0); + assert!(!v0::Defender::::exists()); + assert!(!v0::Members::::exists()); } pub fn from_original, I: Instance + 'static>( past_payouts: &mut [(::AccountId, BalanceOf)], ) -> Result { // Migrate Bids from old::Bids (just a trunctation). - Bids::::put(BoundedVec::<_, T::MaxBids>::truncate_from(old::Bids::::take())); + Bids::::put(BoundedVec::<_, T::MaxBids>::truncate_from(v0::Bids::::take())); // Initialise round counter. RoundCount::::put(0); // Migrate Candidates from old::Candidates - for Bid { who: candidate, kind, value } in old::Candidates::::take().into_iter() { + for Bid { who: candidate, kind, value } in v0::Candidates::::take().into_iter() { let mut tally = Tally::default(); // Migrate Votes from old::Votes // No need to drain, since we're overwriting values. - for (voter, vote) in old::Votes::::iter_prefix(&candidate) { + for (voter, vote) in v0::Votes::::iter_prefix(&candidate) { Votes::::insert( &candidate, &voter, - Vote { approve: vote == old::Vote::Approve, weight: 1 }, + Vote { approve: vote == v0::Vote::Approve, weight: 1 }, ); match vote { - old::Vote::Approve => tally.approvals.saturating_inc(), - old::Vote::Reject => tally.rejections.saturating_inc(), - old::Vote::Skeptic => Skeptic::::put(&voter), + v0::Vote::Approve => tally.approvals.saturating_inc(), + v0::Vote::Reject => tally.rejections.saturating_inc(), + v0::Vote::Skeptic => Skeptic::::put(&voter), } } Candidates::::insert( @@ -271,9 +271,9 @@ pub fn from_original, I: Instance + 'static>( // Migrate Members from old::Members old::Strikes old::Vouching let mut member_count = 0; - for member in old::Members::::take() { - let strikes = old::Strikes::::take(&member); - let vouching = old::Vouching::::take(&member); + for member in v0::Members::::take() { + let strikes = v0::Strikes::::take(&member); + let vouching = v0::Vouching::::take(&member); let record = MemberRecord { index: member_count, rank: 0, strikes, vouching }; Members::::insert(&member, record); MemberByIndex::::insert(member_count, &member); @@ -314,7 +314,7 @@ pub fn from_original, I: Instance + 'static>( // Migrate Payouts from: old::Payouts and raw info (needed since we can't query old chain // state). past_payouts.sort(); - for (who, mut payouts) in old::Payouts::::iter() { + for (who, mut payouts) in v0::Payouts::::iter() { payouts.truncate(T::MaxPayouts::get() as usize); // ^^ Safe since we already truncated. let paid = past_payouts @@ -329,19 +329,19 @@ pub fn from_original, I: Instance + 'static>( } // Migrate SuspendedMembers from old::SuspendedMembers old::Strikes old::Vouching. - for who in old::SuspendedMembers::::iter_keys() { - let strikes = old::Strikes::::take(&who); - let vouching = old::Vouching::::take(&who); + for who in v0::SuspendedMembers::::iter_keys() { + let strikes = v0::Strikes::::take(&who); + let vouching = v0::Vouching::::take(&who); let record = MemberRecord { index: 0, rank: 0, strikes, vouching }; SuspendedMembers::::insert(&who, record); } // Any suspended candidates remaining are rejected. - let _ = old::SuspendedCandidates::::clear(u32::MAX, None); + let _ = v0::SuspendedCandidates::::clear(u32::MAX, None); // We give the current defender the benefit of the doubt. - old::Defender::::kill(); - let _ = old::DefenderVotes::::clear(u32::MAX, None); + v0::Defender::::kill(); + let _ = v0::DefenderVotes::::clear(u32::MAX, None); Ok(T::BlockWeights::get().max_block) } diff --git a/substrate/frame/society/src/tests.rs b/substrate/frame/society/src/tests.rs index 940643168fb..411567e1ded 100644 --- a/substrate/frame/society/src/tests.rs +++ b/substrate/frame/society/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for the module. use super::*; -use migrations::old; +use migrations::v0; use mock::*; use frame_support::{assert_noop, assert_ok}; @@ -32,41 +32,41 @@ use RuntimeOrigin as Origin; #[test] fn migration_works() { EnvBuilder::new().founded(false).execute(|| { - use old::Vote::*; + use v0::Vote::*; // Initialise the old storage items. Founder::::put(10); Head::::put(30); - old::Members::::put(vec![10, 20, 30]); - old::Vouching::::insert(30, Vouching); - old::Vouching::::insert(40, Banned); - old::Strikes::::insert(20, 1); - old::Strikes::::insert(30, 2); - old::Strikes::::insert(40, 5); - old::Payouts::::insert(20, vec![(1, 1)]); - old::Payouts::::insert( + v0::Members::::put(vec![10, 20, 30]); + v0::Vouching::::insert(30, Vouching); + v0::Vouching::::insert(40, Banned); + v0::Strikes::::insert(20, 1); + v0::Strikes::::insert(30, 2); + v0::Strikes::::insert(40, 5); + v0::Payouts::::insert(20, vec![(1, 1)]); + v0::Payouts::::insert( 30, (0..=::MaxPayouts::get()) .map(|i| (i as u64, i as u64)) .collect::>(), ); - old::SuspendedMembers::::insert(40, true); + v0::SuspendedMembers::::insert(40, true); - old::Defender::::put(20); - old::DefenderVotes::::insert(10, Approve); - old::DefenderVotes::::insert(20, Approve); - old::DefenderVotes::::insert(30, Reject); + v0::Defender::::put(20); + v0::DefenderVotes::::insert(10, Approve); + v0::DefenderVotes::::insert(20, Approve); + v0::DefenderVotes::::insert(30, Reject); - old::SuspendedCandidates::::insert(50, (10, Deposit(100))); + v0::SuspendedCandidates::::insert(50, (10, Deposit(100))); - old::Candidates::::put(vec![ + v0::Candidates::::put(vec![ Bid { who: 60, kind: Deposit(100), value: 200 }, Bid { who: 70, kind: Vouch(30, 30), value: 100 }, ]); - old::Votes::::insert(60, 10, Approve); - old::Votes::::insert(70, 10, Reject); - old::Votes::::insert(70, 20, Approve); - old::Votes::::insert(70, 30, Approve); + v0::Votes::::insert(60, 10, Approve); + v0::Votes::::insert(70, 10, Reject); + v0::Votes::::insert(70, 20, Approve); + v0::Votes::::insert(70, 30, Approve); let bids = (0..=::MaxBids::get()) .map(|i| Bid { @@ -75,7 +75,7 @@ fn migration_works() { value: 10u64 + i as u64, }) .collect::>(); - old::Bids::::put(bids); + v0::Bids::::put(bids); migrations::from_original::(&mut [][..]).expect("migration failed"); migrations::assert_internal_consistency::(); diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 98984be9920..3e3b1113a81 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -67,11 +67,11 @@ pub mod v14 { pub struct MigrateToV14(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV14 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let in_code = Pallet::::in_code_storage_version(); let on_chain = Pallet::::on_chain_storage_version(); - if current == 14 && on_chain == 13 { - current.put::>(); + if in_code == 14 && on_chain == 13 { + in_code.put::>(); log!(info, "v14 applied successfully."); T::DbWeight::get().reads_writes(1, 1) @@ -108,12 +108,12 @@ pub mod v13 { } fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let in_code = Pallet::::in_code_storage_version(); let onchain = StorageVersion::::get(); - if current == 13 && onchain == ObsoleteReleases::V12_0_0 { + if in_code == 13 && onchain == ObsoleteReleases::V12_0_0 { StorageVersion::::kill(); - current.put::>(); + in_code.put::>(); log!(info, "v13 applied successfully"); T::DbWeight::get().reads_writes(1, 2) diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index e0213efd507..992a7fe2c9c 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -66,7 +66,7 @@ pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(14); #[pallet::pallet] diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 78729ee3dc3..5840377d722 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -555,6 +555,42 @@ pub fn __create_tt_macro(input: TokenStream) -> TokenStream { tt_macro::create_tt_return_macro(input) } +/// Allows accessing on-chain pallet storage that is no longer accessible via the pallet. +/// +/// This is especially useful when writing storage migrations, when types of storage items are +/// modified or outright removed, but the previous definition is required to perform the migration. +/// +/// ## Example +/// +/// Imagine a pallet with the following storage definition: +/// ```ignore +/// #[pallet::storage] +/// pub type Value = StorageValue<_, u32>; +/// ``` +/// `Value` can be accessed by calling `Value::::get()`. +/// +/// Now imagine the definition of `Value` is updated to a `(u32, u32)`: +/// ```ignore +/// #[pallet::storage] +/// pub type Value = StorageValue<_, (u32, u32)>; +/// ``` +/// The on-chain value of `Value` is `u32`, but `Value::::get()` expects it to be `(u32, u32)`. +/// +/// In this instance the developer must write a storage migration to reading the old value of +/// `Value` and writing it back to storage in the new format, so that the on-chain storage layout is +/// consistent with what is defined in the pallet. +/// +/// We can read the old v0 value of `Value` in the migration by creating a `storage_alias`: +/// ```ignore +/// pub(crate) mod v0 { +/// use super::*; +/// +/// #[storage_alias] +/// pub type Value = StorageValue, u32>; +/// } +/// ``` +/// +/// The developer can now access the old value of `Value` by calling `v0::Value::::get()`. #[proc_macro_attribute] pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream { storage_alias::storage_alias(attributes.into(), input.into()) @@ -1058,7 +1094,7 @@ pub fn generate_store(_: TokenStream, _: TokenStream) -> TokenStream { /// pub struct Pallet(_); /// ``` /// -/// If not present, the current storage version is set to the default value. +/// If not present, the in-code storage version is set to the default value. #[proc_macro_attribute] pub fn storage_version(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() diff --git a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs index 5044d4285bb..6b25ddcba1a 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs @@ -42,7 +42,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { >::name::().unwrap_or("") }; - let initialize_on_chain_storage_version = if let Some(current_version) = + let initialize_on_chain_storage_version = if let Some(in_code_version) = &def.pallet_struct.storage_version { quote::quote! { @@ -50,9 +50,9 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { target: #frame_support::LOG_TARGET, "πŸ₯ New pallet {:?} detected in the runtime. Initializing the on-chain storage version to match the storage version defined in the pallet: {:?}", #pallet_name, - #current_version + #in_code_version ); - #current_version.put::(); + #in_code_version.put::(); } } else { quote::quote! { @@ -73,10 +73,10 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::__private::log::info!( target: #frame_support::LOG_TARGET, "⚠️ {} declares internal migrations (which *might* execute). \ - On-chain `{:?}` vs current storage version `{:?}`", + On-chain `{:?}` vs in-code storage version `{:?}`", #pallet_name, ::on_chain_storage_version(), - ::current_storage_version(), + ::in_code_storage_version(), ); } } else { @@ -102,23 +102,23 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { }; // If a storage version is set, we should ensure that the storage version on chain matches the - // current storage version. This assumes that `Executive` is running custom migrations before + // in-code storage version. This assumes that `Executive` is running custom migrations before // the pallets are called. let post_storage_version_check = if def.pallet_struct.storage_version.is_some() { quote::quote! { let on_chain_version = ::on_chain_storage_version(); - let current_version = ::current_storage_version(); + let in_code_version = ::in_code_storage_version(); - if on_chain_version != current_version { + if on_chain_version != in_code_version { #frame_support::__private::log::error!( target: #frame_support::LOG_TARGET, - "{}: On chain storage version {:?} doesn't match current storage version {:?}.", + "{}: On chain storage version {:?} doesn't match in-code storage version {:?}.", #pallet_name, on_chain_version, - current_version, + in_code_version, ); - return Err("On chain and current storage version do not match. Missing runtime upgrade?".into()); + return Err("On chain and in-code storage version do not match. Missing runtime upgrade?".into()); } } } else { diff --git a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs index c2102f0284d..7cdf6bde9de 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -160,7 +160,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { } ); - let (storage_version, current_storage_version_ty) = + let (storage_version, in_code_storage_version_ty) = if let Some(v) = def.pallet_struct.storage_version.as_ref() { (quote::quote! { #v }, quote::quote! { #frame_support::traits::StorageVersion }) } else { @@ -203,9 +203,9 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { for #pallet_ident<#type_use_gen> #config_where_clause { - type CurrentStorageVersion = #current_storage_version_ty; + type InCodeStorageVersion = #in_code_storage_version_ty; - fn current_storage_version() -> Self::CurrentStorageVersion { + fn in_code_storage_version() -> Self::InCodeStorageVersion { #storage_version } diff --git a/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs b/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs index f4af86aa3e9..c2855ae38ef 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs @@ -44,7 +44,7 @@ pub struct PalletStructDef { /// Whether to specify the storages max encoded len when implementing `StorageInfoTrait`. /// Contains the span of the attribute. pub without_storage_info: Option, - /// The current storage version of the pallet. + /// The in-code storage version of the pallet. pub storage_version: Option, } diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 3e97a384275..d2d9a33053f 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -1178,7 +1178,7 @@ pub mod pallet_prelude { /// # `pallet::storage_version` /// /// Because the [`pallet::pallet`](#pallet-struct-placeholder-palletpallet-mandatory) macro -/// implements [`traits::GetStorageVersion`], the current storage version needs to be +/// implements [`traits::GetStorageVersion`], the in-code storage version needs to be /// communicated to the macro. This can be done by using the `pallet::storage_version` /// attribute: /// @@ -1190,7 +1190,7 @@ pub mod pallet_prelude { /// pub struct Pallet(_); /// ``` /// -/// If not present, the current storage version is set to the default value. +/// If not present, the in-code storage version is set to the default value. /// /// Also see [`pallet::storage_version`](`frame_support::pallet_macros::storage_version`) /// diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index d059a992a86..f7541059023 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -43,12 +43,30 @@ use sp_std::marker::PhantomData; /// Otherwise, a warning is logged notifying the developer that the upgrade was a noop and should /// probably be removed. /// +/// It is STRONGLY RECOMMENDED to write the unversioned migration logic in a private module and +/// only export the versioned migration logic to prevent accidentally using the unversioned +/// migration in any runtimes. +/// /// ### Examples /// ```ignore /// // In file defining migrations -/// pub struct VersionUncheckedMigrateV5ToV6(sp_std::marker::PhantomData); -/// impl OnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { -/// // OnRuntimeUpgrade implementation... +/// +/// /// Private module containing *version unchecked* migration logic. +/// /// +/// /// Should only be used by the [`VersionedMigration`] type in this module to create something to +/// /// export. +/// /// +/// /// We keep this private so the unversioned migration cannot accidentally be used in any runtimes. +/// /// +/// /// For more about this pattern of keeping items private, see +/// /// - https://github.com/rust-lang/rust/issues/30905 +/// /// - https://internals.rust-lang.org/t/lang-team-minutes-private-in-public-rules/4504/40 +/// mod version_unchecked { +/// use super::*; +/// pub struct MigrateV5ToV6(sp_std::marker::PhantomData); +/// impl OnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { +/// // OnRuntimeUpgrade implementation... +/// } /// } /// /// pub type MigrateV5ToV6 = @@ -91,7 +109,7 @@ impl< const FROM: u16, const TO: u16, Inner: crate::traits::OnRuntimeUpgrade, - Pallet: GetStorageVersion + PalletInfoAccess, + Pallet: GetStorageVersion + PalletInfoAccess, DbWeight: Get, > crate::traits::OnRuntimeUpgrade for VersionedMigration { @@ -163,25 +181,25 @@ impl< } } -/// Can store the current pallet version in storage. -pub trait StoreCurrentStorageVersion { - /// Write the current storage version to the storage. - fn store_current_storage_version(); +/// Can store the in-code pallet version on-chain. +pub trait StoreInCodeStorageVersion { + /// Write the in-code storage version on-chain. + fn store_in_code_storage_version(); } -impl + PalletInfoAccess> - StoreCurrentStorageVersion for StorageVersion +impl + PalletInfoAccess> + StoreInCodeStorageVersion for StorageVersion { - fn store_current_storage_version() { - let version = ::current_storage_version(); + fn store_in_code_storage_version() { + let version = ::in_code_storage_version(); version.put::(); } } -impl + PalletInfoAccess> - StoreCurrentStorageVersion for NoStorageVersionSet +impl + PalletInfoAccess> + StoreInCodeStorageVersion for NoStorageVersionSet { - fn store_current_storage_version() { + fn store_in_code_storage_version() { StorageVersion::default().put::(); } } @@ -193,7 +211,7 @@ pub trait PalletVersionToStorageVersionHelper { impl PalletVersionToStorageVersionHelper for T where - T::CurrentStorageVersion: StoreCurrentStorageVersion, + T::InCodeStorageVersion: StoreInCodeStorageVersion, { fn migrate(db_weight: &RuntimeDbWeight) -> Weight { const PALLET_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__PALLET_VERSION__:"; @@ -204,8 +222,7 @@ where sp_io::storage::clear(&pallet_version_key(::name())); - >::store_current_storage_version( - ); + >::store_in_code_storage_version(); db_weight.writes(2) } @@ -226,7 +243,7 @@ impl PalletVersionToStorageVersionHelper for T { /// Migrate from the `PalletVersion` struct to the new [`StorageVersion`] struct. /// -/// This will remove all `PalletVersion's` from the state and insert the current storage version. +/// This will remove all `PalletVersion's` from the state and insert the in-code storage version. pub fn migrate_from_pallet_version_to_storage_version< Pallets: PalletVersionToStorageVersionHelper, >( diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index 20788ce932d..72f047c3a73 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -90,7 +90,7 @@ impl OnIdle for Tuple { /// /// Implementing this trait for a pallet let's you express operations that should /// happen at genesis. It will be called in an externalities provided environment and -/// will see the genesis state after all pallets have written their genesis state. +/// will set the genesis state after all pallets have written their genesis state. #[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] #[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] @@ -306,19 +306,23 @@ pub trait IntegrityTest { /// end /// ``` /// -/// * `OnRuntimeUpgrade` is only executed before everything else if a code -/// * `OnRuntimeUpgrade` is mandatorily at the beginning of the block body (extrinsics) being -/// processed. change is detected. -/// * Extrinsics start with inherents, and continue with other signed or unsigned extrinsics. -/// * `OnIdle` optionally comes after extrinsics. -/// `OnFinalize` mandatorily comes after `OnIdle`. +/// * [`OnRuntimeUpgrade`](Hooks::OnRuntimeUpgrade) hooks are only executed when a code change is +/// detected. +/// * [`OnRuntimeUpgrade`](Hooks::OnRuntimeUpgrade) hooks are mandatorily executed at the very +/// beginning of the block body, before any extrinsics are processed. +/// * [`Inherents`](sp_inherents) are always executed before any other other signed or unsigned +/// extrinsics. +/// * [`OnIdle`](Hooks::OnIdle) hooks are executed after extrinsics if there is weight remaining in +/// the block. +/// * [`OnFinalize`](Hooks::OnFinalize) hooks are mandatorily executed after +/// [`OnIdle`](Hooks::OnIdle). /// -/// > `OffchainWorker` is not part of this flow, as it is not really part of the consensus/main -/// > block import path, and is called optionally, and in other circumstances. See -/// > [`crate::traits::misc::OffchainWorker`] for more information. +/// > [`OffchainWorker`](crate::traits::misc::OffchainWorker) hooks are not part of this flow, +/// > because they are not part of the consensus/main block building logic. See +/// > [`OffchainWorker`](crate::traits::misc::OffchainWorker) for more information. /// -/// To learn more about the execution of hooks see `frame-executive` as this component is is charge -/// of dispatching extrinsics and placing the hooks in the correct order. +/// To learn more about the execution of hooks see the FRAME `Executive` pallet which is in charge +/// of dispatching extrinsics and calling hooks in the correct order. pub trait Hooks { /// Block initialization hook. This is called at the very beginning of block execution. /// @@ -370,30 +374,38 @@ pub trait Hooks { Weight::zero() } - /// Hook executed when a code change (aka. a "runtime upgrade") is detected by FRAME. + /// Hook executed when a code change (aka. a "runtime upgrade") is detected by the FRAME + /// `Executive` pallet. /// /// Be aware that this is called before [`Hooks::on_initialize`] of any pallet; therefore, a lot /// of the critical storage items such as `block_number` in system pallet might have not been - /// set. + /// set yet. /// - /// Vert similar to [`Hooks::on_initialize`], any code in this block is mandatory and MUST - /// execute. Use with care. + /// Similar to [`Hooks::on_initialize`], any code in this block is mandatory and MUST execute. + /// It is strongly recommended to dry-run the execution of these hooks using + /// [try-runtime-cli](https://github.com/paritytech/try-runtime-cli) to ensure they will not + /// produce and overweight block which can brick your chain. Use with care! /// - /// ## Implementation Note: Versioning + /// ## Implementation Note: Standalone Migrations /// - /// 1. An implementation of this should typically follow a pattern where the version of the - /// pallet is checked against the onchain version, and a decision is made about what needs to be - /// done. This is helpful to prevent accidental repetitive execution of this hook, which can be - /// catastrophic. + /// Additional migrations can be created by directly implementing [`OnRuntimeUpgrade`] on + /// structs and passing them to `Executive`. /// - /// Alternatively, [`frame_support::migrations::VersionedMigration`] can be used to assist with - /// this. + /// ## Implementation Note: Pallet Versioning /// - /// ## Implementation Note: Runtime Level Migration + /// Implementations of this hook are typically wrapped in + /// [`crate::migrations::VersionedMigration`] to ensure the migration is executed exactly + /// once and only when it is supposed to. /// - /// Additional "upgrade hooks" can be created by pallets by a manual implementation of - /// [`Hooks::on_runtime_upgrade`] which can be passed on to `Executive` at the top level - /// runtime. + /// Alternatively, developers can manually implement version checks. + /// + /// Failure to adequately check storage versions can result in accidental repetitive execution + /// of the hook, which can be catastrophic. + /// + /// ## Implementation Note: Weight + /// + /// Typically, implementations of this method are simple enough that weights can be calculated + /// manually. However, if required, a benchmark can also be used. fn on_runtime_upgrade() -> Weight { Weight::zero() } @@ -403,7 +415,7 @@ pub trait Hooks { /// It should focus on certain checks to ensure that the state is sensible. This is never /// executed in a consensus code-path, therefore it can consume as much weight as it needs. /// - /// This hook should not alter any storage. + /// This hook must not alter any storage. #[cfg(feature = "try-runtime")] fn try_state(_n: BlockNumber) -> Result<(), TryRuntimeError> { Ok(()) @@ -415,7 +427,7 @@ pub trait Hooks { /// which will be passed to `post_upgrade` after upgrading for post-check. An empty vector /// should be returned if there is no such need. /// - /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + /// This hook is never executed on-chain but instead used by testing tools. #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { Ok(Vec::new()) diff --git a/substrate/frame/support/src/traits/metadata.rs b/substrate/frame/support/src/traits/metadata.rs index 586af20511a..8bda4186bc9 100644 --- a/substrate/frame/support/src/traits/metadata.rs +++ b/substrate/frame/support/src/traits/metadata.rs @@ -261,41 +261,64 @@ impl Add for StorageVersion { } } -/// Special marker struct if no storage version is set for a pallet. +/// Special marker struct used when [`storage_version`](crate::pallet_macros::storage_version) is +/// not defined for a pallet. /// /// If you (the reader) end up here, it probably means that you tried to compare /// [`GetStorageVersion::on_chain_storage_version`] against -/// [`GetStorageVersion::current_storage_version`]. This basically means that the -/// [`storage_version`](crate::pallet_macros::storage_version) is missing in the pallet where the -/// mentioned functions are being called. +/// [`GetStorageVersion::in_code_storage_version`]. This basically means that the +/// [`storage_version`](crate::pallet_macros::storage_version) is missing from the pallet where the +/// mentioned functions are being called, and needs to be defined. #[derive(Debug, Default)] pub struct NoStorageVersionSet; -/// Provides information about the storage version of a pallet. +/// Provides information about a pallet's storage versions. /// -/// It differentiates between current and on-chain storage version. Both should be only out of sync -/// when a new runtime upgrade was applied and the runtime migrations did not yet executed. -/// Otherwise it means that the pallet works with an unsupported storage version and unforeseen -/// stuff can happen. +/// Every pallet has two storage versions: +/// 1. An in-code storage version +/// 2. An on-chain storage version /// -/// The current storage version is the version of the pallet as supported at runtime. The active -/// storage version is the version of the pallet in the storage. +/// The in-code storage version is the version of the pallet as defined in the runtime blob, and the +/// on-chain storage version is the version of the pallet stored on-chain. /// -/// It is required to update the on-chain storage version manually when a migration was applied. +/// Storage versions should be only ever be out of sync when a pallet has been updated to a new +/// version and the in-code version is incremented, but the migration has not yet been executed +/// on-chain as part of a runtime upgrade. +/// +/// It is the responsibility of the developer to ensure that the on-chain storage version is set +/// correctly during a migration so that it matches the in-code storage version. pub trait GetStorageVersion { - /// This will be filled out by the [`pallet`](crate::pallet) macro. + /// This type is generated by the [`pallet`](crate::pallet) macro. + /// + /// If the [`storage_version`](crate::pallet_macros::storage_version) attribute isn't specified, + /// this is set to [`NoStorageVersionSet`] to signify that it is missing. + /// + /// If the [`storage_version`](crate::pallet_macros::storage_version) attribute is specified, + /// this is be set to a [`StorageVersion`] corresponding to the attribute. /// - /// If the [`storage_version`](crate::pallet_macros::storage_version) attribute isn't given - /// this is set to [`NoStorageVersionSet`] to inform the user that the attribute is missing. - /// This should prevent that the user forgets to set a storage version when required. However, - /// this will only work when the user actually tries to call [`Self::current_storage_version`] - /// to compare it against the [`Self::on_chain_storage_version`]. If the attribute is given, - /// this will be set to [`StorageVersion`]. - type CurrentStorageVersion; - - /// Returns the current storage version as supported by the pallet. - fn current_storage_version() -> Self::CurrentStorageVersion; - /// Returns the on-chain storage version of the pallet as stored in the storage. + /// The intention of using [`NoStorageVersionSet`] instead of defaulting to a [`StorageVersion`] + /// of zero is to prevent developers from forgetting to set + /// [`storage_version`](crate::pallet_macros::storage_version) when it is required, like in the + /// case that they wish to compare the in-code storage version to the on-chain storage version. + type InCodeStorageVersion; + + #[deprecated( + note = "This method has been renamed to `in_code_storage_version` and will be removed after March 2024." + )] + /// DEPRECATED: Use [`Self::current_storage_version`] instead. + /// + /// Returns the in-code storage version as specified in the + /// [`storage_version`](crate::pallet_macros::storage_version) attribute, or + /// [`NoStorageVersionSet`] if the attribute is missing. + fn current_storage_version() -> Self::InCodeStorageVersion { + Self::in_code_storage_version() + } + + /// Returns the in-code storage version as specified in the + /// [`storage_version`](crate::pallet_macros::storage_version) attribute, or + /// [`NoStorageVersionSet`] if the attribute is missing. + fn in_code_storage_version() -> Self::InCodeStorageVersion; + /// Returns the storage version of the pallet as last set in the actual on-chain storage. fn on_chain_storage_version() -> StorageVersion; } diff --git a/substrate/frame/support/test/tests/construct_runtime.rs b/substrate/frame/support/test/tests/construct_runtime.rs index b8341b25cb0..b7698681886 100644 --- a/substrate/frame/support/test/tests/construct_runtime.rs +++ b/substrate/frame/support/test/tests/construct_runtime.rs @@ -683,7 +683,7 @@ fn test_metadata() { name: "Version", ty: meta_type::(), value: RuntimeVersion::default().encode(), - docs: maybe_docs(vec![ " Get the chain's current version."]), + docs: maybe_docs(vec![ " Get the chain's in-code version."]), }, PalletConstantMetadata { name: "SS58Prefix", diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 9b4381c2f82..7bee4b7c820 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -590,7 +590,7 @@ pub mod pallet2 { Self::deposit_event(Event::Something(31)); if UpdateStorageVersion::get() { - Self::current_storage_version().put::(); + Self::in_code_storage_version().put::(); } Weight::zero() @@ -1310,7 +1310,7 @@ fn pallet_on_genesis() { assert_eq!(pallet::Pallet::::on_chain_storage_version(), StorageVersion::new(0)); pallet::Pallet::::on_genesis(); assert_eq!( - pallet::Pallet::::current_storage_version(), + pallet::Pallet::::in_code_storage_version(), pallet::Pallet::::on_chain_storage_version(), ); }) @@ -2257,10 +2257,10 @@ fn pallet_on_chain_storage_version_initializes_correctly() { AllPalletsWithSystem, >; - // Simple example of a pallet with current version 10 being added to the runtime for the first + // Simple example of a pallet with in-code version 10 being added to the runtime for the first // time. TestExternalities::default().execute_with(|| { - let current_version = Example::current_storage_version(); + let in_code_version = Example::in_code_storage_version(); // Check the pallet has no storage items set. let pallet_hashed_prefix = twox_128(Example::name().as_bytes()); @@ -2271,14 +2271,14 @@ fn pallet_on_chain_storage_version_initializes_correctly() { // version. Executive::execute_on_runtime_upgrade(); - // Check that the storage version was initialized to the current version + // Check that the storage version was initialized to the in-code version let on_chain_version_after = StorageVersion::get::(); - assert_eq!(on_chain_version_after, current_version); + assert_eq!(on_chain_version_after, in_code_version); }); - // Pallet with no current storage version should have the on-chain version initialized to 0. + // Pallet with no in-code storage version should have the on-chain version initialized to 0. TestExternalities::default().execute_with(|| { - // Example4 current_storage_version is NoStorageVersionSet. + // Example4 in_code_storage_version is NoStorageVersionSet. // Check the pallet has no storage items set. let pallet_hashed_prefix = twox_128(Example4::name().as_bytes()); @@ -2308,7 +2308,7 @@ fn post_runtime_upgrade_detects_storage_version_issues() { impl OnRuntimeUpgrade for CustomUpgrade { fn on_runtime_upgrade() -> Weight { - Example2::current_storage_version().put::(); + Example2::in_code_storage_version().put::(); Default::default() } @@ -2351,14 +2351,14 @@ fn post_runtime_upgrade_detects_storage_version_issues() { >; TestExternalities::default().execute_with(|| { - // Set the on-chain version to one less than the current version for `Example`, simulating a + // Set the on-chain version to one less than the in-code version for `Example`, simulating a // forgotten migration StorageVersion::new(9).put::(); // The version isn't changed, we should detect it. assert!( Executive::try_runtime_upgrade(UpgradeCheckSelect::PreAndPost).unwrap_err() == - "On chain and current storage version do not match. Missing runtime upgrade?" + "On chain and in-code storage version do not match. Missing runtime upgrade?" .into() ); }); diff --git a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.rs b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.rs index 840a6dee20c..d2ca9fc8099 100644 --- a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.rs +++ b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.rs @@ -29,7 +29,7 @@ mod pallet { #[pallet::hooks] impl Hooks> for Pallet { fn on_runtime_upgrade() -> Weight { - if Self::current_storage_version() != Self::on_chain_storage_version() { + if Self::in_code_storage_version() != Self::on_chain_storage_version() { } diff --git a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr index 1b48197cc9e..3256e69528a 100644 --- a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr @@ -1,7 +1,7 @@ error[E0369]: binary operation `!=` cannot be applied to type `NoStorageVersionSet` --> tests/pallet_ui/compare_unset_storage_version.rs:32:39 | -32 | if Self::current_storage_version() != Self::on_chain_storage_version() { +32 | if Self::in_code_storage_version() != Self::on_chain_storage_version() { | ------------------------------- ^^ -------------------------------- StorageVersion | | | NoStorageVersionSet diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index c527a483d88..01df09106d9 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -525,7 +525,7 @@ pub mod pallet { #[pallet::constant] type DbWeight: Get; - /// Get the chain's current version. + /// Get the chain's in-code version. #[pallet::constant] type Version: Get; diff --git a/substrate/frame/tips/src/lib.rs b/substrate/frame/tips/src/lib.rs index 4c7cfc3028a..8c360fb57d7 100644 --- a/substrate/frame/tips/src/lib.rs +++ b/substrate/frame/tips/src/lib.rs @@ -122,7 +122,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] -- GitLab From f1b2189e8312821a9f8a79dc4f0183d314273faa Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 28 Feb 2024 11:43:58 +0200 Subject: [PATCH 14/86] rpc-v2/tx/tests: Add transaction broadcast tests and check propagated tx status (#3193) This PR adds tests for the `transaction_broadcast` method. The testing needs to coordinate the following components: - The `TestApi` marks transactions as invalid and implements `ChainApi::validate_transaction` - this is what dictates if a transaction is valid or not and is called from within the `BasicPool` - The `BasicPool` which maintains the transactions and implements `submit_and_watch` needed by the tx broadcast to submit the transaction - The status of the transaction pool is exposed by mocking the BasicPool - The `ChainHeadMockClient` which mocks the `BlockchainEvents::import_notification_stream` needed by the tx broadcast to know to which blocks the transaction is submitted The following changes have been added to the substrate testing to accommodate this: - `TestApi` gets ` remove_invalid`, counterpart to `add_invalid` to ensure an invalid transaction can become valid again; as well as a priority setter for extrinsics - `BasicPool` test constructor is extended with options for the `PoolRotator` - this mechanism is needed because transactions are banned for 30mins (default) after they are declared invalid - testing bypasses this by providing a `Duration::ZERO` ### Testing Scenarios - Capture the status of the transaction as it is normally broadcasted - `transaction_stop` is valid while the transaction is in progress - A future transaction is handled when the dependencies are completed - Try to resubmit the transaction at a later block (currently invalid) - An invalid transaction status is propagated; the transaction is marked as temporarily banned; then the ban expires and transaction is resubmitted This builds on top of: https://github.com/paritytech/polkadot-sdk/pull/3079 Part of: https://github.com/paritytech/polkadot-sdk/issues/3084 cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile Co-authored-by: James Wilson --- .../rpc-spec-v2/src/chain_head/test_utils.rs | 4 +- .../rpc-spec-v2/src/transaction/tests.rs | 238 -------- .../src/transaction/tests/executor.rs | 100 ++++ .../src/transaction/tests/middleware_pool.rs | 187 +++++++ .../rpc-spec-v2/src/transaction/tests/mod.rs | 24 + .../src/transaction/tests/setup.rs | 120 ++++ .../tests/transaction_broadcast_tests.rs | 523 ++++++++++++++++++ substrate/client/transaction-pool/src/lib.rs | 3 +- .../client/transaction-pool/tests/pool.rs | 3 +- .../runtime/transaction-pool/src/lib.rs | 27 +- 10 files changed, 985 insertions(+), 244 deletions(-) delete mode 100644 substrate/client/rpc-spec-v2/src/transaction/tests.rs create mode 100644 substrate/client/rpc-spec-v2/src/transaction/tests/executor.rs create mode 100644 substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs create mode 100644 substrate/client/rpc-spec-v2/src/transaction/tests/mod.rs create mode 100644 substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs create mode 100644 substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs diff --git a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs index d63a98a5cb0..e81bd4bfa0b 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs @@ -63,7 +63,7 @@ impl ChainHeadMockClient { BlockImportNotification::new(header.hash(), BlockOrigin::Own, header, true, None, sink); for sink in self.import_sinks.lock().iter_mut() { - sink.unbounded_send(notification.clone()).unwrap(); + let _ = sink.unbounded_send(notification.clone()); } } @@ -83,7 +83,7 @@ impl ChainHeadMockClient { let notification = FinalityNotification::from_summary(summary, sink); for sink in self.finality_sinks.lock().iter_mut() { - sink.unbounded_send(notification.clone()).unwrap(); + let _ = sink.unbounded_send(notification.clone()); } } } diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests.rs deleted file mode 100644 index 382f5adeae1..00000000000 --- a/substrate/client/rpc-spec-v2/src/transaction/tests.rs +++ /dev/null @@ -1,238 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use super::*; -use crate::{ - chain_head::test_utils::ChainHeadMockClient, hex_string, - transaction::TransactionBroadcast as RpcTransactionBroadcast, -}; -use assert_matches::assert_matches; -use codec::Encode; -use futures::Future; -use jsonrpsee::{rpc_params, MethodsError as Error, RpcModule}; -use sc_transaction_pool::*; -use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionPool}; -use sp_core::{testing::TaskExecutor, traits::SpawnNamed}; -use std::{pin::Pin, sync::Arc, time::Duration}; -use substrate_test_runtime_client::{prelude::*, AccountKeyring::*, Client}; -use substrate_test_runtime_transaction_pool::{uxt, TestApi}; -use tokio::sync::mpsc; - -type Block = substrate_test_runtime_client::runtime::Block; - -/// Wrap the `TaskExecutor` to know when the broadcast future is dropped. -#[derive(Clone)] -struct TaskExecutorBroadcast { - executor: TaskExecutor, - sender: mpsc::UnboundedSender<()>, -} - -/// The channel that receives events when the broadcast futures are dropped. -type TaskExecutorRecv = mpsc::UnboundedReceiver<()>; - -impl TaskExecutorBroadcast { - /// Construct a new `TaskExecutorBroadcast` and a receiver to know when the broadcast futures - /// are dropped. - fn new() -> (Self, TaskExecutorRecv) { - let (sender, recv) = mpsc::unbounded_channel(); - - (Self { executor: TaskExecutor::new(), sender }, recv) - } -} - -impl SpawnNamed for TaskExecutorBroadcast { - fn spawn( - &self, - name: &'static str, - group: Option<&'static str>, - future: futures::future::BoxFuture<'static, ()>, - ) { - let sender = self.sender.clone(); - let future = Box::pin(async move { - future.await; - let _ = sender.send(()); - }); - - self.executor.spawn(name, group, future) - } - - fn spawn_blocking( - &self, - name: &'static str, - group: Option<&'static str>, - future: futures::future::BoxFuture<'static, ()>, - ) { - let sender = self.sender.clone(); - let future = Box::pin(async move { - future.await; - let _ = sender.send(()); - }); - - self.executor.spawn_blocking(name, group, future) - } -} - -/// Initial Alice account nonce. -const ALICE_NONCE: u64 = 209; - -fn create_basic_pool_with_genesis( - test_api: Arc, -) -> (BasicPool, Pin + Send>>) { - let genesis_hash = { - test_api - .chain() - .read() - .block_by_number - .get(&0) - .map(|blocks| blocks[0].0.header.hash()) - .expect("there is block 0. qed") - }; - BasicPool::new_test(test_api, genesis_hash, genesis_hash) -} - -fn maintained_pool() -> (BasicPool, Arc, futures::executor::ThreadPool) { - let api = Arc::new(TestApi::with_alice_nonce(ALICE_NONCE)); - let (pool, background_task) = create_basic_pool_with_genesis(api.clone()); - - let thread_pool = futures::executor::ThreadPool::new().unwrap(); - thread_pool.spawn_ok(background_task); - (pool, api, thread_pool) -} - -fn setup_api() -> ( - Arc, - Arc>, - Arc>>, - RpcModule< - TransactionBroadcast, ChainHeadMockClient>>, - >, - TaskExecutorRecv, -) { - let (pool, api, _) = maintained_pool(); - let pool = Arc::new(pool); - - let builder = TestClientBuilder::new(); - let client = Arc::new(builder.build()); - let client_mock = Arc::new(ChainHeadMockClient::new(client.clone())); - - let (task_executor, executor_recv) = TaskExecutorBroadcast::new(); - - let tx_api = - RpcTransactionBroadcast::new(client_mock.clone(), pool.clone(), Arc::new(task_executor)) - .into_rpc(); - - (api, pool, client_mock, tx_api, executor_recv) -} - -#[tokio::test] -async fn tx_broadcast_enters_pool() { - let (api, pool, client_mock, tx_api, _) = setup_api(); - - // Start at block 1. - let block_1_header = api.push_block(1, vec![], true); - - let uxt = uxt(Alice, ALICE_NONCE); - let xt = hex_string(&uxt.encode()); - - let operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); - - // Announce block 1 to `transaction_unstable_broadcast`. - client_mock.trigger_import_stream(block_1_header).await; - - // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. - - // TODO: Improve testability by extending the `transaction_unstable_broadcast` with - // a middleware trait that intercepts the transaction status for testing. - let mut num_retries = 12; - while num_retries > 0 && pool.status().ready != 1 { - tokio::time::sleep(Duration::from_secs(5)).await; - num_retries -= 1; - } - assert_eq!(1, pool.status().ready); - assert_eq!(uxt.encode().len(), pool.status().ready_bytes); - - // Import block 2 with the transaction included. - let block_2_header = api.push_block(2, vec![uxt.clone()], true); - let block_2 = block_2_header.hash(); - - // Announce block 2 to the pool. - let event = ChainEvent::NewBestBlock { hash: block_2, tree_route: None }; - pool.maintain(event).await; - - assert_eq!(0, pool.status().ready); - - // Stop call can still be made. - let _: () = tx_api - .call("transaction_unstable_stop", rpc_params![&operation_id]) - .await - .unwrap(); -} - -#[tokio::test] -async fn tx_broadcast_invalid_tx() { - let (_, pool, _, tx_api, mut exec_recv) = setup_api(); - - // Invalid parameters. - let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_broadcast", [1u8]) - .await - .unwrap_err(); - assert_matches!(err, - Error::JsonRpc(err) if err.code() == super::error::json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid params" - ); - - assert_eq!(0, pool.status().ready); - - // Invalid transaction that cannot be decoded. The broadcast silently exits. - let xt = "0xdeadbeef"; - let operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); - - assert_eq!(0, pool.status().ready); - - // Await the broadcast future to exit. - // Without this we'd be subject to races, where we try to call the stop before the tx is - // dropped. - exec_recv.recv().await.unwrap(); - - // The broadcast future was dropped, and the operation is no longer active. - // When the operation is not active, either from the tx being finalized or a - // terminal error; the stop method should return an error. - let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_stop", rpc_params![&operation_id]) - .await - .unwrap_err(); - assert_matches!(err, - Error::JsonRpc(err) if err.code() == super::error::json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid operation id" - ); -} - -#[tokio::test] -async fn tx_invalid_stop() { - let (_, _, _, tx_api, _) = setup_api(); - - // Make an invalid stop call. - let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_stop", ["invalid_operation_id"]) - .await - .unwrap_err(); - assert_matches!(err, - Error::JsonRpc(err) if err.code() == super::error::json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid operation id" - ); -} diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/executor.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/executor.rs new file mode 100644 index 00000000000..ff9aca79887 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/executor.rs @@ -0,0 +1,100 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use sp_core::{testing::TaskExecutor, traits::SpawnNamed}; +use std::sync::{atomic::AtomicUsize, Arc}; +use tokio::sync::mpsc; + +/// Wrap the `TaskExecutor` to know when the broadcast future is dropped. +#[derive(Clone)] +pub struct TaskExecutorBroadcast { + executor: TaskExecutor, + sender: mpsc::UnboundedSender<()>, + num_tasks: Arc, +} + +/// The channel that receives events when the broadcast futures are dropped. +pub type TaskExecutorRecv = mpsc::UnboundedReceiver<()>; + +/// The state of the `TaskExecutorBroadcast`. +pub struct TaskExecutorState { + pub recv: TaskExecutorRecv, + pub num_tasks: Arc, +} + +impl TaskExecutorState { + pub fn num_tasks(&self) -> usize { + self.num_tasks.load(std::sync::atomic::Ordering::Acquire) + } +} + +impl TaskExecutorBroadcast { + /// Construct a new `TaskExecutorBroadcast` and a receiver to know when the broadcast futures + /// are dropped. + pub fn new() -> (Self, TaskExecutorState) { + let (sender, recv) = mpsc::unbounded_channel(); + let num_tasks = Arc::new(AtomicUsize::new(0)); + + ( + Self { executor: TaskExecutor::new(), sender, num_tasks: num_tasks.clone() }, + TaskExecutorState { recv, num_tasks }, + ) + } +} + +impl SpawnNamed for TaskExecutorBroadcast { + fn spawn( + &self, + name: &'static str, + group: Option<&'static str>, + future: futures::future::BoxFuture<'static, ()>, + ) { + let sender = self.sender.clone(); + let num_tasks = self.num_tasks.clone(); + + let future = Box::pin(async move { + num_tasks.fetch_add(1, std::sync::atomic::Ordering::AcqRel); + future.await; + num_tasks.fetch_sub(1, std::sync::atomic::Ordering::AcqRel); + + let _ = sender.send(()); + }); + + self.executor.spawn(name, group, future) + } + + fn spawn_blocking( + &self, + name: &'static str, + group: Option<&'static str>, + future: futures::future::BoxFuture<'static, ()>, + ) { + let sender = self.sender.clone(); + let num_tasks = self.num_tasks.clone(); + + let future = Box::pin(async move { + num_tasks.fetch_add(1, std::sync::atomic::Ordering::AcqRel); + future.await; + num_tasks.fetch_sub(1, std::sync::atomic::Ordering::AcqRel); + + let _ = sender.send(()); + }); + + self.executor.spawn_blocking(name, group, future) + } +} diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs new file mode 100644 index 00000000000..aa8ac572dec --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs @@ -0,0 +1,187 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use codec::Encode; +use futures::Future; +use sc_transaction_pool::BasicPool; +use sc_transaction_pool_api::{ + ImportNotificationStream, PoolFuture, PoolStatus, ReadyTransactions, TransactionFor, + TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, +}; + +use crate::hex_string; +use futures::{FutureExt, StreamExt}; + +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::{collections::HashMap, pin::Pin, sync::Arc}; +use substrate_test_runtime_transaction_pool::TestApi; +use tokio::sync::mpsc; + +pub type Block = substrate_test_runtime_client::runtime::Block; + +pub type TxTestPool = MiddlewarePool; +pub type TxStatusType = sc_transaction_pool_api::TransactionStatus< + sc_transaction_pool_api::TxHash, + sc_transaction_pool_api::BlockHash, +>; +pub type TxStatusTypeTest = TxStatusType; + +/// The type of the event that the middleware captures. +#[derive(Debug, PartialEq)] +pub enum MiddlewarePoolEvent { + TransactionStatus { + transaction: String, + status: sc_transaction_pool_api::TransactionStatus< + ::Hash, + ::Hash, + >, + }, + PoolError { + transaction: String, + err: String, + }, +} + +/// The channel that receives events when the broadcast futures are dropped. +pub type MiddlewarePoolRecv = mpsc::UnboundedReceiver; + +/// Add a middleware to the transaction pool. +/// +/// This wraps the `submit_and_watch` to gain access to the events. +pub struct MiddlewarePool { + pub inner_pool: Arc>, + /// Send the middleware events to the test. + sender: mpsc::UnboundedSender, +} + +impl MiddlewarePool { + /// Construct a new [`MiddlewarePool`]. + pub fn new(pool: Arc>) -> (Self, MiddlewarePoolRecv) { + let (sender, recv) = mpsc::unbounded_channel(); + (MiddlewarePool { inner_pool: pool, sender }, recv) + } +} + +impl TransactionPool for MiddlewarePool { + type Block = as TransactionPool>::Block; + type Hash = as TransactionPool>::Hash; + type InPoolTransaction = as TransactionPool>::InPoolTransaction; + type Error = as TransactionPool>::Error; + + fn submit_at( + &self, + at: ::Hash, + source: TransactionSource, + xts: Vec>, + ) -> PoolFuture, Self::Error>>, Self::Error> { + self.inner_pool.submit_at(at, source, xts) + } + + fn submit_one( + &self, + at: ::Hash, + source: TransactionSource, + xt: TransactionFor, + ) -> PoolFuture, Self::Error> { + self.inner_pool.submit_one(at, source, xt) + } + + fn submit_and_watch( + &self, + at: ::Hash, + source: TransactionSource, + xt: TransactionFor, + ) -> PoolFuture>>, Self::Error> { + let pool = self.inner_pool.clone(); + let sender = self.sender.clone(); + let transaction = hex_string(&xt.encode()); + + async move { + let watcher = match pool.submit_and_watch(at, source, xt).await { + Ok(watcher) => watcher, + Err(err) => { + let _ = sender.send(MiddlewarePoolEvent::PoolError { + transaction: transaction.clone(), + err: err.to_string(), + }); + return Err(err); + }, + }; + + let watcher = watcher.map(move |status| { + let sender = sender.clone(); + let transaction = transaction.clone(); + + let _ = sender.send(MiddlewarePoolEvent::TransactionStatus { + transaction, + status: status.clone(), + }); + + status + }); + + Ok(watcher.boxed()) + } + .boxed() + } + + fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { + self.inner_pool.remove_invalid(hashes) + } + + fn status(&self) -> PoolStatus { + self.inner_pool.status() + } + + fn import_notification_stream(&self) -> ImportNotificationStream> { + self.inner_pool.import_notification_stream() + } + + fn hash_of(&self, xt: &TransactionFor) -> TxHash { + self.inner_pool.hash_of(xt) + } + + fn on_broadcasted(&self, propagations: HashMap, Vec>) { + self.inner_pool.on_broadcasted(propagations) + } + + fn ready_transaction(&self, hash: &TxHash) -> Option> { + self.inner_pool.ready_transaction(hash) + } + + fn ready_at( + &self, + at: NumberFor, + ) -> Pin< + Box< + dyn Future< + Output = Box> + Send>, + > + Send, + >, + > { + self.inner_pool.ready_at(at) + } + + fn ready(&self) -> Box> + Send> { + self.inner_pool.ready() + } + + fn futures(&self) -> Vec { + self.inner_pool.futures() + } +} diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/mod.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/mod.rs new file mode 100644 index 00000000000..ab0caaf906f --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/mod.rs @@ -0,0 +1,24 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +mod executor; +mod middleware_pool; +#[macro_use] +mod setup; + +mod transaction_broadcast_tests; diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs new file mode 100644 index 00000000000..04ee7b9b4c9 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs @@ -0,0 +1,120 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{ + chain_head::test_utils::ChainHeadMockClient, + transaction::{ + api::TransactionBroadcastApiServer, + tests::executor::{TaskExecutorBroadcast, TaskExecutorState}, + TransactionBroadcast as RpcTransactionBroadcast, + }, +}; +use futures::Future; +use jsonrpsee::RpcModule; +use sc_transaction_pool::*; +use std::{pin::Pin, sync::Arc}; +use substrate_test_runtime_client::{prelude::*, Client}; +use substrate_test_runtime_transaction_pool::TestApi; + +use crate::transaction::tests::middleware_pool::{MiddlewarePool, MiddlewarePoolRecv}; + +pub type Block = substrate_test_runtime_client::runtime::Block; + +/// Initial Alice account nonce. +pub const ALICE_NONCE: u64 = 209; + +fn create_basic_pool_with_genesis( + test_api: Arc, + options: Options, +) -> (BasicPool, Pin + Send>>) { + let genesis_hash = { + test_api + .chain() + .read() + .block_by_number + .get(&0) + .map(|blocks| blocks[0].0.header.hash()) + .expect("there is block 0. qed") + }; + BasicPool::new_test(test_api, genesis_hash, genesis_hash, options) +} + +fn maintained_pool( + options: Options, +) -> (BasicPool, Arc, futures::executor::ThreadPool) { + let api = Arc::new(TestApi::with_alice_nonce(ALICE_NONCE)); + let (pool, background_task) = create_basic_pool_with_genesis(api.clone(), options); + + let thread_pool = futures::executor::ThreadPool::new().unwrap(); + thread_pool.spawn_ok(background_task); + (pool, api, thread_pool) +} + +pub fn setup_api( + options: Options, +) -> ( + Arc, + Arc, + Arc>>, + RpcModule>>>, + TaskExecutorState, + MiddlewarePoolRecv, +) { + let (pool, api, _) = maintained_pool(options); + let (pool, pool_state) = MiddlewarePool::new(Arc::new(pool).clone()); + let pool = Arc::new(pool); + + let builder = TestClientBuilder::new(); + let client = Arc::new(builder.build()); + let client_mock = Arc::new(ChainHeadMockClient::new(client.clone())); + + let (task_executor, executor_recv) = TaskExecutorBroadcast::new(); + + let tx_api = + RpcTransactionBroadcast::new(client_mock.clone(), pool.clone(), Arc::new(task_executor)) + .into_rpc(); + + (api, pool, client_mock, tx_api, executor_recv, pool_state) +} + +/// Get the next event from the provided middleware in at most 5 seconds. +macro_rules! get_next_event { + ($middleware:expr) => { + tokio::time::timeout(std::time::Duration::from_secs(5), $middleware.recv()) + .await + .unwrap() + .unwrap() + }; +} + +/// Collect the next number of transaction events from the provided middleware. +macro_rules! get_next_tx_events { + ($middleware:expr, $num:expr) => {{ + let mut events = std::collections::HashMap::new(); + for _ in 0..$num { + let event = get_next_event!($middleware); + match event { + crate::transaction::tests::middleware_pool::MiddlewarePoolEvent::TransactionStatus { transaction, status } => { + events.entry(transaction).or_insert_with(|| vec![]).push(status); + }, + other => panic!("Expected TransactionStatus, received {:?}", other), + }; + } + events + }}; +} diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs new file mode 100644 index 00000000000..690a1a64d74 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs @@ -0,0 +1,523 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{hex_string, transaction::error::json_rpc_spec}; +use assert_matches::assert_matches; +use codec::Encode; +use jsonrpsee::{rpc_params, MethodsError as Error}; +use sc_transaction_pool::{Options, PoolLimit}; +use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionPool}; +use std::sync::Arc; +use substrate_test_runtime_client::AccountKeyring::*; +use substrate_test_runtime_transaction_pool::uxt; + +// Test helpers. +use crate::transaction::tests::{ + middleware_pool::{MiddlewarePoolEvent, TxStatusTypeTest}, + setup::{setup_api, ALICE_NONCE}, +}; + +#[tokio::test] +async fn tx_broadcast_enters_pool() { + let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = + setup_api(Default::default()); + + // Start at block 1. + let block_1_header = api.push_block(1, vec![], true); + + let uxt = uxt(Alice, ALICE_NONCE); + let xt = hex_string(&uxt.encode()); + + let operation_id: String = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + + // Announce block 1 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_1_header).await; + + // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Ready + } + ); + + assert_eq!(1, pool.inner_pool.status().ready); + assert_eq!(uxt.encode().len(), pool.inner_pool.status().ready_bytes); + + // Import block 2 with the transaction included. + let block_2_header = api.push_block(2, vec![uxt.clone()], true); + let block_2 = block_2_header.hash(); + + // Announce block 2 to the pool. + let event = ChainEvent::NewBestBlock { hash: block_2, tree_route: None }; + pool.inner_pool.maintain(event).await; + assert_eq!(0, pool.inner_pool.status().ready); + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::InBlock((block_2, 0)) + } + ); + + // The future broadcast awaits for the finalized status to be reached. + // Force the future to exit by calling stop. + let _: () = tx_api + .call("transaction_unstable_stop", rpc_params![&operation_id]) + .await + .unwrap(); + + // Ensure the broadcast future finishes. + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); +} + +#[tokio::test] +async fn tx_broadcast_invalid_tx() { + let (_, pool, _, tx_api, mut exec_middleware, _) = setup_api(Default::default()); + + // Invalid parameters. + let err = tx_api + .call::<_, serde_json::Value>("transaction_unstable_broadcast", [1u8]) + .await + .unwrap_err(); + assert_matches!(err, + Error::JsonRpc(err) if err.code() == json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid params" + ); + + assert_eq!(0, pool.status().ready); + + // Invalid transaction that cannot be decoded. The broadcast silently exits. + let xt = "0xdeadbeef"; + let operation_id: String = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + + assert_eq!(0, pool.status().ready); + + // Await the broadcast future to exit. + // Without this we'd be subject to races, where we try to call the stop before the tx is + // dropped. + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); + + // The broadcast future was dropped, and the operation is no longer active. + // When the operation is not active, either from the tx being finalized or a + // terminal error; the stop method should return an error. + let err = tx_api + .call::<_, serde_json::Value>("transaction_unstable_stop", rpc_params![&operation_id]) + .await + .unwrap_err(); + assert_matches!(err, + Error::JsonRpc(err) if err.code() == json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid operation id" + ); +} + +#[tokio::test] +async fn tx_stop_with_invalid_operation_id() { + let (_, _, _, tx_api, _, _) = setup_api(Default::default()); + + // Make an invalid stop call. + let err = tx_api + .call::<_, serde_json::Value>("transaction_unstable_stop", ["invalid_operation_id"]) + .await + .unwrap_err(); + assert_matches!(err, + Error::JsonRpc(err) if err.code() == json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid operation id" + ); +} + +#[tokio::test] +async fn tx_broadcast_resubmits_future_nonce_tx() { + let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = + setup_api(Default::default()); + + // Start at block 1. + let block_1_header = api.push_block(1, vec![], true); + let block_1 = block_1_header.hash(); + + let current_uxt = uxt(Alice, ALICE_NONCE); + let current_xt = hex_string(¤t_uxt.encode()); + // This lives in the future. + let future_uxt = uxt(Alice, ALICE_NONCE + 1); + let future_xt = hex_string(&future_uxt.encode()); + + let future_operation_id: String = tx_api + .call("transaction_unstable_broadcast", rpc_params![&future_xt]) + .await + .unwrap(); + + // Announce block 1 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_1_header).await; + + // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: future_xt.clone(), + status: TxStatusTypeTest::Future + } + ); + + let event = ChainEvent::NewBestBlock { hash: block_1, tree_route: None }; + pool.inner_pool.maintain(event).await; + assert_eq!(0, pool.inner_pool.status().ready); + // Ensure the tx is in the future. + assert_eq!(1, pool.inner_pool.status().future); + + let block_2_header = api.push_block(2, vec![], true); + let block_2 = block_2_header.hash(); + + let operation_id: String = tx_api + .call("transaction_unstable_broadcast", rpc_params![¤t_xt]) + .await + .unwrap(); + assert_ne!(future_operation_id, operation_id); + + // Announce block 2 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_2_header).await; + + // Collect the events of both transactions. + let events = get_next_tx_events!(&mut pool_middleware, 2); + // Transactions entered the ready queue. + assert_eq!(events.get(¤t_xt).unwrap(), &vec![TxStatusTypeTest::Ready]); + assert_eq!(events.get(&future_xt).unwrap(), &vec![TxStatusTypeTest::Ready]); + + let event = ChainEvent::NewBestBlock { hash: block_2, tree_route: None }; + pool.inner_pool.maintain(event).await; + assert_eq!(2, pool.inner_pool.status().ready); + assert_eq!(0, pool.inner_pool.status().future); + + // Finalize transactions. + let block_3_header = api.push_block(3, vec![current_uxt, future_uxt], true); + let block_3 = block_3_header.hash(); + client_mock.trigger_import_stream(block_3_header).await; + + let event = ChainEvent::Finalized { hash: block_3, tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + assert_eq!(0, pool.inner_pool.status().ready); + assert_eq!(0, pool.inner_pool.status().future); + + let events = get_next_tx_events!(&mut pool_middleware, 4); + assert_eq!( + events.get(¤t_xt).unwrap(), + &vec![TxStatusTypeTest::InBlock((block_3, 0)), TxStatusTypeTest::Finalized((block_3, 0))] + ); + assert_eq!( + events.get(&future_xt).unwrap(), + &vec![TxStatusTypeTest::InBlock((block_3, 1)), TxStatusTypeTest::Finalized((block_3, 1))] + ); + + // Both broadcast futures must exit. + let _ = get_next_event!(&mut exec_middleware.recv); + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); +} + +/// This test is similar to `tx_broadcast_enters_pool` +/// However the last block is announced as finalized to force the +/// broadcast future to exit before the `stop` is called. +#[tokio::test] +async fn tx_broadcast_stop_after_broadcast_finishes() { + let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = + setup_api(Default::default()); + + // Start at block 1. + let block_1_header = api.push_block(1, vec![], true); + + let uxt = uxt(Alice, ALICE_NONCE); + let xt = hex_string(&uxt.encode()); + + let operation_id: String = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + + // Announce block 1 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_1_header).await; + + // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction + // pool.inner_pool. + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Ready + } + ); + + assert_eq!(1, pool.inner_pool.status().ready); + assert_eq!(uxt.encode().len(), pool.inner_pool.status().ready_bytes); + + // Import block 2 with the transaction included. + let block_2_header = api.push_block(2, vec![uxt.clone()], true); + let block_2 = block_2_header.hash(); + + // Announce block 2 to the pool.inner_pool. + let event = ChainEvent::Finalized { hash: block_2, tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + + assert_eq!(0, pool.inner_pool.status().ready); + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::InBlock((block_2, 0)) + } + ); + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Finalized((block_2, 0)) + } + ); + + // Ensure the broadcast future terminated properly. + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); + + // The operation ID is no longer valid, check that the broadcast future + // cleared out the inner state of the operation. + let err = tx_api + .call::<_, serde_json::Value>("transaction_unstable_stop", rpc_params![&operation_id]) + .await + .unwrap_err(); + assert_matches!(err, + Error::JsonRpc(err) if err.code() == json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid operation id" + ); +} + +#[tokio::test] +async fn tx_broadcast_resubmits_invalid_tx() { + let limits = PoolLimit { count: 8192, total_bytes: 20 * 1024 * 1024 }; + let options = Options { + ready: limits.clone(), + future: limits, + reject_future_transactions: false, + // This ensures that a transaction is not banned. + ban_time: std::time::Duration::ZERO, + }; + + let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = + setup_api(options); + + let uxt = uxt(Alice, ALICE_NONCE); + let xt = hex_string(&uxt.encode()); + let _operation_id: String = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + + let block_1_header = api.push_block(1, vec![], true); + let block_1 = block_1_header.hash(); + // Announce block 1 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_1_header).await; + + // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Ready, + } + ); + assert_eq!(1, pool.inner_pool.status().ready); + assert_eq!(uxt.encode().len(), pool.inner_pool.status().ready_bytes); + + // Mark the transaction as invalid from the API, causing a temporary ban. + api.add_invalid(&uxt); + + // Push an event to the pool to ensure the transaction is excluded. + let event = ChainEvent::NewBestBlock { hash: block_1, tree_route: None }; + pool.inner_pool.maintain(event).await; + assert_eq!(1, pool.inner_pool.status().ready); + + // Ensure the `transaction_unstable_broadcast` is aware of the invalid transaction. + let event = get_next_event!(&mut pool_middleware); + // Because we have received an `Invalid` status, we try to broadcast the transaction with the + // next announced block. + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Invalid + } + ); + + // Import block 2. + let block_2_header = api.push_block(2, vec![], true); + client_mock.trigger_import_stream(block_2_header).await; + + // Ensure we propagate the temporary ban error to `submit_and_watch`. + // This ensures we'll loop again with the next annmounced block and try to resubmit the + // transaction. The transaction remains temporarily banned until the pool is maintained. + let event = get_next_event!(&mut pool_middleware); + assert_matches!(event, MiddlewarePoolEvent::PoolError { transaction, err } if transaction == xt && err.contains("Transaction temporarily Banned")); + + // Import block 3. + let block_3_header = api.push_block(3, vec![], true); + let block_3 = block_3_header.hash(); + // Remove the invalid transaction from the pool to allow it to pass through. + api.remove_invalid(&uxt); + let event = ChainEvent::NewBestBlock { hash: block_3, tree_route: None }; + // We have to maintain the pool to ensure the transaction is no longer invalid. + // This clears out the banned transactions. + pool.inner_pool.maintain(event).await; + assert_eq!(0, pool.inner_pool.status().ready); + + // Announce block to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_3_header).await; + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Ready, + } + ); + assert_eq!(1, pool.inner_pool.status().ready); + + let block_4_header = api.push_block(4, vec![uxt], true); + let block_4 = block_4_header.hash(); + let event = ChainEvent::Finalized { hash: block_4, tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::InBlock((block_4, 0)), + } + ); + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Finalized((block_4, 0)), + } + ); + + // Ensure the broadcast future terminated properly. + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); +} + +/// This is similar to `tx_broadcast_resubmits_invalid_tx`. +/// However, it forces the tx to be resubmited because of the pool +/// limits. Which is a different code path than the invalid tx. +#[tokio::test] +async fn tx_broadcast_resubmits_dropped_tx() { + let limits = PoolLimit { count: 1, total_bytes: 1000 }; + let options = Options { + ready: limits.clone(), + future: limits, + reject_future_transactions: false, + // This ensures that a transaction is not banned. + ban_time: std::time::Duration::ZERO, + }; + + let (api, pool, client_mock, tx_api, _, mut pool_middleware) = setup_api(options); + + let current_uxt = uxt(Alice, ALICE_NONCE); + let current_xt = hex_string(¤t_uxt.encode()); + // This lives in the future. + let future_uxt = uxt(Alice, ALICE_NONCE + 1); + let future_xt = hex_string(&future_uxt.encode()); + + // By default the `validate_transaction` mock uses priority 1 for + // transactions. Bump the priority to ensure other transactions + // are immediately dropped. + api.set_priority(¤t_uxt, 10); + + let current_operation_id: String = tx_api + .call("transaction_unstable_broadcast", rpc_params![¤t_xt]) + .await + .unwrap(); + + // Announce block 1 to `transaction_unstable_broadcast`. + let block_1_header = api.push_block(1, vec![], true); + let event = + ChainEvent::Finalized { hash: block_1_header.hash(), tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + client_mock.trigger_import_stream(block_1_header).await; + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: current_xt.clone(), + status: TxStatusTypeTest::Ready, + } + ); + assert_eq!(1, pool.inner_pool.status().ready); + + // The future tx has priority 2, smaller than the current 10. + api.set_priority(&future_uxt, 2); + let future_operation_id: String = tx_api + .call("transaction_unstable_broadcast", rpc_params![&future_xt]) + .await + .unwrap(); + assert_ne!(current_operation_id, future_operation_id); + + let block_2_header = api.push_block(2, vec![], true); + let event = + ChainEvent::Finalized { hash: block_2_header.hash(), tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + client_mock.trigger_import_stream(block_2_header).await; + + // We must have at most 1 transaction in the pool, as per limits above. + assert_eq!(1, pool.inner_pool.status().ready); + + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::PoolError { + transaction: future_xt.clone(), + err: "Transaction couldn't enter the pool because of the limit".into() + } + ); + + let block_3_header = api.push_block(3, vec![current_uxt], true); + let event = + ChainEvent::Finalized { hash: block_3_header.hash(), tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + client_mock.trigger_import_stream(block_3_header.clone()).await; + + // The first tx is in a finalzied block; the future tx must enter the pool. + let events = get_next_tx_events!(&mut pool_middleware, 3); + assert_eq!( + events.get(¤t_xt).unwrap(), + &vec![ + TxStatusTypeTest::InBlock((block_3_header.hash(), 0)), + TxStatusTypeTest::Finalized((block_3_header.hash(), 0)) + ] + ); + // The dropped transaction was resubmitted. + assert_eq!(events.get(&future_xt).unwrap(), &vec![TxStatusTypeTest::Ready]); +} diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs index faa3f455a58..64b301e6bf3 100644 --- a/substrate/client/transaction-pool/src/lib.rs +++ b/substrate/client/transaction-pool/src/lib.rs @@ -164,8 +164,9 @@ where pool_api: Arc, best_block_hash: Block::Hash, finalized_hash: Block::Hash, + options: graph::Options, ) -> (Self, Pin + Send>>) { - let pool = Arc::new(graph::Pool::new(Default::default(), true.into(), pool_api.clone())); + let pool = Arc::new(graph::Pool::new(options, true.into(), pool_api.clone())); let (revalidation_queue, background_task) = revalidation::RevalidationQueue::new_background( pool_api.clone(), pool.clone(), diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index 6b1a197440c..461b9860d41 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -73,7 +73,7 @@ fn create_basic_pool_with_genesis( .map(|blocks| blocks[0].0.header.hash()) .expect("there is block 0. qed") }; - BasicPool::new_test(test_api, genesis_hash, genesis_hash) + BasicPool::new_test(test_api, genesis_hash, genesis_hash, Default::default()) } fn create_basic_pool(test_api: TestApi) -> BasicPool { @@ -994,6 +994,7 @@ fn import_notification_to_pool_maintain_works() { )), best_hash, finalized_hash, + Default::default(), ) .0, ); diff --git a/substrate/test-utils/runtime/transaction-pool/src/lib.rs b/substrate/test-utils/runtime/transaction-pool/src/lib.rs index 8c8345b06bd..5202e6e6515 100644 --- a/substrate/test-utils/runtime/transaction-pool/src/lib.rs +++ b/substrate/test-utils/runtime/transaction-pool/src/lib.rs @@ -81,6 +81,7 @@ pub struct ChainState { pub block_by_hash: HashMap, pub nonces: HashMap, pub invalid_hashes: HashSet, + pub priorities: HashMap, } /// Test Api for transaction pool. @@ -214,6 +215,22 @@ impl TestApi { self.chain.write().invalid_hashes.insert(Self::hash_and_length_inner(xts).0); } + /// Remove a transaction that was previously declared as invalid via `[Self::add_invalid]`. + /// + /// Next time transaction pool will try to validate this + /// extrinsic, api will succeed. + pub fn remove_invalid(&self, xts: &Extrinsic) { + self.chain.write().invalid_hashes.remove(&Self::hash_and_length_inner(xts).0); + } + + /// Set a transaction priority. + pub fn set_priority(&self, xts: &Extrinsic, priority: u64) { + self.chain + .write() + .priorities + .insert(Self::hash_and_length_inner(xts).0, priority); + } + /// Query validation requests received. pub fn validation_requests(&self) -> Vec { self.validation_requests.read().clone() @@ -300,8 +317,14 @@ impl ChainApi for TestApi { return ready(Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(0))))) } - let mut validity = - ValidTransaction { priority: 1, requires, provides, longevity: 64, propagate: true }; + let priority = self.chain.read().priorities.get(&self.hash_and_length(&uxt).0).cloned(); + let mut validity = ValidTransaction { + priority: priority.unwrap_or(1), + requires, + provides, + longevity: 64, + propagate: true, + }; (self.valid_modifier.read())(&mut validity); -- GitLab From f4eedcebafa4d2addb8b71ba31ab1bc579ba3c4a Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 28 Feb 2024 13:13:27 +0100 Subject: [PATCH 15/86] [prdoc] Optional SemVer bumps and Docs (#3441) Changes: - Add an optional `bump` field to the crates in a prdoc. - Explain the cargo semver interpretation for <1 versions in the release doc. --------- Signed-off-by: Oliver Tale-Yazdi --- docs/RELEASE.md | 12 +++++++++--- prdoc/schema_user.json | 33 +++++++++++++++++++++++++++++---- 2 files changed, 38 insertions(+), 7 deletions(-) diff --git a/docs/RELEASE.md b/docs/RELEASE.md index 6d681d78f36..e73be2779a9 100644 --- a/docs/RELEASE.md +++ b/docs/RELEASE.md @@ -18,10 +18,16 @@ Rococo. To easily refer to a release, it shall be named by its date in the form ## Crate -We try to follow [SemVer 2.0.0](https://semver.org/) as best as possible for versioning our crates. SemVer requires a -piece of software to first declare a public API. The public API of the Polkadot SDK is hereby declared as the sum of all -crates' public APIs. +We try to follow [SemVer 2.0.0](https://semver.org/) as best as possible for versioning our crates. The definitions of +`major`, `minor` and `patch` version for Rust crates are slightly altered from their standard for pre `1.0.0` versions. +Quoting [rust-lang.org](https://doc.rust-lang.org/cargo/reference/semver.html): +>Initial development releases starting with β€œ0.y.z” can treat changes in β€œy” as a major release, and β€œz” as a minor +release. β€œ0.0.z” releases are always major changes. This is because Cargo uses the convention that only changes in the +left-most non-zero component are considered incompatible. + +SemVer requires a piece of software to first declare a public API. The public API of the Polkadot SDK +is hereby declared as the sum of all crates' public APIs. Inductively, the public API of our library crates is declared as all public items that are neither: - Inside a `__private` module diff --git a/prdoc/schema_user.json b/prdoc/schema_user.json index 82215d51866..1bd0b3b93ee 100644 --- a/prdoc/schema_user.json +++ b/prdoc/schema_user.json @@ -3,9 +3,8 @@ "$id": "https://raw.githubusercontent.com/paritytech/prdoc/master/prdoc_schema_user.json", "version": { "major": 1, - "minor": 0, - "patch": 0, - "timestamp": 20230817152351 + "minor": 1, + "patch": 0 }, "title": "Polkadot SDK PRDoc Schema", "description": "JSON Schema definition for the Polkadot SDK PR documentation", @@ -125,10 +124,16 @@ "name": { "type": "string" }, + "bump": { + "$ref": "#/$defs/semver_bump" + }, "note": { "type": "string" } - } + }, + "required": [ + "name" + ] }, "migration_db": { "type": "object", @@ -165,6 +170,26 @@ "description" ] }, + "semver_bump": { + "description": "The type of bump to apply to the crate version according to Cargo SemVer: https://doc.rust-lang.org/cargo/reference/semver.html. Please check docs/RELEASE.md for more information.", + "oneOf": [ + { + "const": "major", + "title": "Major", + "description": "A bump to the leftmost non-zero digit of the version number." + }, + { + "const": "minor", + "title": "Minor", + "description": "A bump to the second leftmost non-zero digit of the version number." + }, + { + "const": "patch", + "title": "Patch", + "description": "A bump to the third leftmost non-zero digit of the version number." + } + ] + }, "doc": { "type": "object", "description": "You have the the option to provide different description of your PR for different audiences.", -- GitLab From 14530269b7a76f50dc939bf57b1e3d38270b662b Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Wed, 28 Feb 2024 13:07:14 +0000 Subject: [PATCH 16/86] Add documentation around FRAME Offchain workers (#3463) - deprecation companion: https://github.com/substrate-developer-hub/substrate-docs/pull/2136 - inspired by https://substrate.stackexchange.com/questions/11058/how-can-i-create-ocw-that-wont-activates-every-block-but-will-activates-only-w/11060#11060 --------- Co-authored-by: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> --- Cargo.lock | 2 + docs/sdk/Cargo.toml | 5 +- .../reference_docs/frame_offchain_workers.rs | 115 ++++++++++++++++++ docs/sdk/src/reference_docs/mod.rs | 4 + substrate/frame/support/src/traits/hooks.rs | 2 +- 5 files changed, 126 insertions(+), 2 deletions(-) create mode 100644 docs/sdk/src/reference_docs/frame_offchain_workers.rs diff --git a/Cargo.lock b/Cargo.lock index 026786378d5..ef510986257 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13416,6 +13416,7 @@ dependencies = [ "pallet-collective", "pallet-default-config-example", "pallet-democracy", + "pallet-example-offchain-worker", "pallet-example-single-block-migrations", "pallet-examples", "pallet-multisig", @@ -13442,6 +13443,7 @@ dependencies = [ "sp-core", "sp-io", "sp-keyring", + "sp-offchain", "sp-runtime", "sp-version", "staging-chain-spec-builder", diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index b1b60a2d77d..735b3d7df61 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -23,6 +23,7 @@ frame = { path = "../../substrate/frame", features = [ ] } pallet-examples = { path = "../../substrate/frame/examples" } pallet-default-config-example = { path = "../../substrate/frame/examples/default-config" } +pallet-example-offchain-worker = { path = "../../substrate/frame/examples/offchain-worker" } # How we build docs in rust-docs simple-mermaid = "0.1.1" @@ -38,7 +39,7 @@ frame-support = { path = "../../substrate/frame/support", default-features = fal frame-executive = { path = "../../substrate/frame/executive", default-features = false } pallet-example-single-block-migrations = { path = "../../substrate/frame/examples/single-block-migrations" } -# Substrate +# Substrate Client sc-network = { path = "../../substrate/client/network" } sc-rpc-api = { path = "../../substrate/client/rpc-api" } sc-rpc = { path = "../../substrate/client/rpc" } @@ -50,6 +51,7 @@ sc-consensus-grandpa = { path = "../../substrate/client/consensus/grandpa" } sc-consensus-beefy = { path = "../../substrate/client/consensus/beefy" } sc-consensus-manual-seal = { path = "../../substrate/client/consensus/manual-seal" } sc-consensus-pow = { path = "../../substrate/client/consensus/pow" } + substrate-wasm-builder = { path = "../../substrate/utils/wasm-builder" } # Cumulus @@ -78,6 +80,7 @@ sp-api = { path = "../../substrate/primitives/api" } sp-core = { path = "../../substrate/primitives/core" } sp-keyring = { path = "../../substrate/primitives/keyring" } sp-runtime = { path = "../../substrate/primitives/runtime" } +sp-offchain = { path = "../../substrate/primitives/offchain" } sp-version = { path = "../../substrate/primitives/version" } # XCM diff --git a/docs/sdk/src/reference_docs/frame_offchain_workers.rs b/docs/sdk/src/reference_docs/frame_offchain_workers.rs new file mode 100644 index 00000000000..7999707e5ee --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_offchain_workers.rs @@ -0,0 +1,115 @@ +//! # Offchain Workers +//! +//! This reference document explains how offchain workers work in Substrate and FRAME. The main +//! focus is upon FRAME's implementation of this functionality. Nonetheless, offchain workers are a +//! Substrate-provided feature and can be used with possible alternatives to [`frame`] as well. +//! +//! Offchain workers are a commonly misunderstood topic, therefore we explain them bottom-up, +//! starting at the fundamentals and then describing the developer interface. +//! +//! ## Context +//! +//! Recall from [`crate::reference_docs::wasm_meta_protocol`] that the node and the runtime +//! communicate with one another via host functions and runtime APIs. Many of these interactions +//! contribute to the actual state transition of the blockchain. For example [`sp_api::Core`] is the +//! main runtime API that is called to execute new blocks. +//! +//! Offchain workers are in principle not different in any way: It is a runtime API exposed by the +//! wasm blob ([`sp_offchain::OffchainWorkerApi`]), and the node software calls into it when it +//! deems fit. But, crucially, this API call is different in that: +//! +//! 1. It can have no impact on the state ie. it is _OFF (the) CHAIN_. If any state is altered +//! during the execution of this API call, it is discarded. +//! 2. It has access to an extended set of host functions that allow the wasm blob to do more. For +//! example, call into HTTP requests. +//! +//! > The main way through which an offchain worker can interact with the state is by submitting an +//! > extrinsic to the chain. This is the ONLY way to alter the state from an offchain worker. +//! > [`pallet_example_offchain_worker`] provides an example of this. +//! +//! +//! Given the "Off Chain" nature of this API, it is important to remember that calling this API is +//! entirely optional. Some nodes might call into it, some might not, and it would have no impact on +//! the execution of your blockchain because no state is altered no matter the execution of the +//! offchain worker API. +//! +//! Substrate's CLI allows some degree of configuration about this, allowing node operators to +//! specify when they want to run the offchain worker API. See +//! [`sc_cli::RunCmd::offchain_worker_params`]. +//! +//! ## Nondeterministic Execution +//! +//! Needless to say, given the above description, the code in your offchain worker API can be +//! nondeterministic, as it is not part of the blockchain's STF, so it can be executed at unknown +//! times, by unknown nodes, and has no impact on the state. This is why an HTTP +//! ([`sp_runtime::offchain::http`]) API is readily provided to the offchain worker APIs. Because +//! there is no need for determinism in this context. +//! +//! > A common mistake here is for novice developers to see this HTTP API, and imagine that +//! > `polkadot-sdk` somehow magically solved the determinism in blockchains, and now a blockchain +//! > can make HTTP calls and it will all work. This is absolutely NOT the case. An HTTP call made +//! > by the offchain worker is non-deterministic by design. Blockchains can't and always won't be +//! > able to perform non-deterministic operations such as making HTTP calls to a foreign server. +//! +//! ## FRAME's API +//! +//! [`frame`] provides a simple API through which pallets can define offchain worker functions. This +//! is part of [`frame::traits::Hooks`], which is implemented as a part of +//! [`frame::pallet_macros::hooks`]. +//! +//! ``` +//! +//! #[frame::pallet] +//! pub mod pallet { +//! use frame::prelude::*; +//! +//! #[pallet::config] +//! pub trait Config: frame_system::Config {} +//! +//! #[pallet::pallet] +//! pub struct Pallet(_); +//! +//! #[pallet::hooks] +//! impl Hooks> for Pallet { +//! fn offchain_worker(block_number: BlockNumberFor) { +//! // ... +//! } +//! } +//! } +//! ``` +//! +//! Additionally, [`sp_runtime::offchain`] provides a set of utilities that can be used to moderate +//! the execution of offchain workers. +//! +//! ## Think Twice: Why Use Substrate's Offchain Workers? +//! +//! Consider the fact that in principle, an offchain worker code written using the above API is no +//! different than an equivalent written with an _actual offchain interaction library_, such as +//! [Polkadot-JS](https://polkadot.js.org/docs/), or any of the other ones listed [here](https://github.com/substrate-developer-hub/awesome-substrate?tab=readme-ov-file#client-libraries). +//! +//! They can both read from the state, and have no means of updating the state, other than the route +//! of submitting an extrinsic to the chain. Therefore, it is worth thinking twice before embedding +//! a logic as a part of Substrate's offchain worker API. Does it have to be there? can it not be a +//! simple, actual offchain application that lives outside of the chain's WASM blob? +//! +//! Some of the reasons why you might want to do the opposite, and actually embed an offchain worker +//! API into the WASM blob are: +//! +//! * Accessing the state is easier within the `offchain_worker` function, as it is already a part +//! of the runtime, and [`frame::pallet_macros::storage`] provides all the tools needed to read +//! the state. Other client libraries might provide varying degrees of capability here. +//! * It will be updated in synchrony with the runtime. A Substrate's offchain application is part +//! of the same WASM blob, and is therefore guaranteed to be up to date. +//! +//! For example, imagine you have modified a storage item to have a new type. This will possibly +//! require a [`crate::reference_docs::frame_runtime_upgrades_and_migrations`], and any offchain +//! code, such as a Polkadot-JS application, will have to be updated to reflect this change. Whereas +//! the WASM offchain worker code is guaranteed to already be updated, or else the runtime code will +//! not even compile. +//! +//! +//! ## Further References +//! +//! - +//! - +//! - [Offchain worker example](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/frame/examples/offchain-worker) diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index b2c751420ce..0bd204e4b6a 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -99,3 +99,7 @@ pub mod frame_runtime_upgrades_and_migrations; /// light-node-first out of the box. // TODO: @jsdw @josepot https://github.com/paritytech/polkadot-sdk-docs/issues/68 pub mod light_nodes; + +/// Learn about the offchain workers, how they function, and how to use them, as provided by the +/// [`frame`] APIs. +pub mod frame_offchain_workers; diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index 72f047c3a73..c37fb0f54bc 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -446,7 +446,7 @@ pub trait Hooks { } /// Implementing this function on a pallet allows you to perform long-running tasks that are - /// dispatched as separate threads, and entirely independent of the main wasm runtime. + /// dispatched as separate threads, and entirely independent of the main blockchain execution. /// /// This function can freely read from the state, but any change it makes to the state is /// meaningless. Writes can be pushed back to the chain by submitting extrinsics from the -- GitLab From 426136671a42ff759127b4a7a71eac6335744675 Mon Sep 17 00:00:00 2001 From: maksimryndin Date: Wed, 28 Feb 2024 17:29:27 +0100 Subject: [PATCH 17/86] PVF: re-preparing artifact on failed runtime construction (#3187) resolve https://github.com/paritytech/polkadot-sdk/issues/3139 - [x] use a distinguishable error for `execute_artifact` - [x] remove artifact in case of a `RuntimeConstruction` error during the execution - [x] augment the `validate_candidate_with_retry` of `ValidationBackend` with the case of retriable `RuntimeConstruction` error during the execution - [x] update the book (https://paritytech.github.io/polkadot-sdk/book/node/utility/pvf-host-and-workers.html#retrying-execution-requests) - [x] add a test - [x] run zombienet tests --------- Co-authored-by: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> --- .../node/core/candidate-validation/src/lib.rs | 53 +++++++----- polkadot/node/core/pvf/common/src/error.rs | 1 + polkadot/node/core/pvf/common/src/execute.rs | 15 ++++ .../core/pvf/common/src/executor_interface.rs | 4 +- .../node/core/pvf/execute-worker/src/lib.rs | 10 ++- polkadot/node/core/pvf/src/artifacts.rs | 8 ++ polkadot/node/core/pvf/src/error.rs | 4 + polkadot/node/core/pvf/src/execute/mod.rs | 2 +- polkadot/node/core/pvf/src/execute/queue.rs | 68 ++++++++++++--- .../core/pvf/src/execute/worker_interface.rs | 8 ++ polkadot/node/core/pvf/src/host.rs | 53 +++++++++++- polkadot/node/core/pvf/tests/it/main.rs | 83 +++++++++++++++++-- polkadot/primitives/src/v6/executor_params.rs | 10 +-- .../src/node/utility/pvf-host-and-workers.md | 8 ++ prdoc/pr_3187.prdoc | 16 ++++ 15 files changed, 294 insertions(+), 49 deletions(-) create mode 100644 prdoc/pr_3187.prdoc diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index bf6e09fd1b6..8237137fdca 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -695,6 +695,8 @@ async fn validate_candidate_exhaustive( ))), Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(err))), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::RuntimeConstruction(err))) => + Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(err))), Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(format!( @@ -780,40 +782,50 @@ trait ValidationBackend { return validation_result } + macro_rules! break_if_no_retries_left { + ($counter:ident) => { + if $counter > 0 { + $counter -= 1; + } else { + break + } + }; + } + // Allow limited retries for each kind of error. let mut num_death_retries_left = 1; let mut num_job_error_retries_left = 1; let mut num_internal_retries_left = 1; + let mut num_runtime_construction_retries_left = 1; loop { // Stop retrying if we exceeded the timeout. if total_time_start.elapsed() + retry_delay > exec_timeout { break } - + let mut retry_immediately = false; match validation_result { Err(ValidationError::PossiblyInvalid( PossiblyInvalidError::AmbiguousWorkerDeath | PossiblyInvalidError::AmbiguousJobDeath(_), - )) => - if num_death_retries_left > 0 { - num_death_retries_left -= 1; - } else { - break - }, + )) => break_if_no_retries_left!(num_death_retries_left), Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(_))) => - if num_job_error_retries_left > 0 { - num_job_error_retries_left -= 1; - } else { - break - }, + break_if_no_retries_left!(num_job_error_retries_left), Err(ValidationError::Internal(_)) => - if num_internal_retries_left > 0 { - num_internal_retries_left -= 1; - } else { - break - }, + break_if_no_retries_left!(num_internal_retries_left), + + Err(ValidationError::PossiblyInvalid( + PossiblyInvalidError::RuntimeConstruction(_), + )) => { + break_if_no_retries_left!(num_runtime_construction_retries_left); + self.precheck_pvf(pvf.clone()).await?; + // In this case the error is deterministic + // And a retry forces the ValidationBackend + // to re-prepare the artifact so + // there is no need to wait before the retry + retry_immediately = true; + }, Ok(_) | Err(ValidationError::Invalid(_) | ValidationError::Preparation(_)) => break, } @@ -821,8 +833,11 @@ trait ValidationBackend { // If we got a possibly transient error, retry once after a brief delay, on the // assumption that the conditions that caused this error may have resolved on their own. { - // Wait a brief delay before retrying. - futures_timer::Delay::new(retry_delay).await; + // In case of many transient errors it is necessary to wait a little bit + // for the error to be probably resolved + if !retry_immediately { + futures_timer::Delay::new(retry_delay).await; + } let new_timeout = exec_timeout.saturating_sub(total_time_start.elapsed()); diff --git a/polkadot/node/core/pvf/common/src/error.rs b/polkadot/node/core/pvf/common/src/error.rs index f8faefc24e6..cf274044456 100644 --- a/polkadot/node/core/pvf/common/src/error.rs +++ b/polkadot/node/core/pvf/common/src/error.rs @@ -16,6 +16,7 @@ use crate::prepare::{PrepareSuccess, PrepareWorkerSuccess}; use parity_scale_codec::{Decode, Encode}; +pub use sc_executor_common::error::Error as ExecuteError; /// Result of PVF preparation from a worker, with checksum of the compiled PVF and stats of the /// preparation if successful. diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index 6b3becf524d..18c97b03cbc 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -40,6 +40,9 @@ pub enum WorkerResponse { }, /// The candidate is invalid. InvalidCandidate(String), + /// Instantiation of the WASM module instance failed during an execution. + /// Possibly related to local issues or dirty node update. May be retried with re-preparation. + RuntimeConstruction(String), /// The job timed out. JobTimedOut, /// The job process has died. We must kill the worker just in case. @@ -68,6 +71,9 @@ pub enum JobResponse { /// The result of parachain validation. result_descriptor: ValidationResult, }, + /// A possibly transient runtime instantiation error happened during the execution; may be + /// retried with re-preparation + RuntimeConstruction(String), /// The candidate is invalid. InvalidCandidate(String), } @@ -81,6 +87,15 @@ impl JobResponse { Self::InvalidCandidate(format!("{}: {}", ctx, msg)) } } + + /// Creates a may retry response from a context `ctx` and a message `msg` (which can be empty). + pub fn runtime_construction(ctx: &'static str, msg: &str) -> Self { + if msg.is_empty() { + Self::RuntimeConstruction(ctx.to_string()) + } else { + Self::RuntimeConstruction(format!("{}: {}", ctx, msg)) + } + } } /// An unexpected error occurred in the execution job process. Because this comes from the job, diff --git a/polkadot/node/core/pvf/common/src/executor_interface.rs b/polkadot/node/core/pvf/common/src/executor_interface.rs index 4cd2f5c85ee..b69a87890f6 100644 --- a/polkadot/node/core/pvf/common/src/executor_interface.rs +++ b/polkadot/node/core/pvf/common/src/executor_interface.rs @@ -16,6 +16,7 @@ //! Interface to the Substrate Executor +use crate::error::ExecuteError; use polkadot_primitives::{ executor_params::{DEFAULT_LOGICAL_STACK_MAX, DEFAULT_NATIVE_STACK_MAX}, ExecutorParam, ExecutorParams, @@ -109,7 +110,7 @@ pub unsafe fn execute_artifact( compiled_artifact_blob: &[u8], executor_params: &ExecutorParams, params: &[u8], -) -> Result, String> { +) -> Result, ExecuteError> { let mut extensions = sp_externalities::Extensions::new(); extensions.register(sp_core::traits::ReadRuntimeVersionExt::new(ReadRuntimeVersion)); @@ -123,7 +124,6 @@ pub unsafe fn execute_artifact( Ok(Ok(ok)) => Ok(ok), Ok(Err(err)) | Err(err) => Err(err), } - .map_err(|err| format!("execute error: {:?}", err)) } /// Constructs the runtime for the given PVF, given the artifact bytes. diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs index 0cfa5a78694..bd7e76010a6 100644 --- a/polkadot/node/core/pvf/execute-worker/src/lib.rs +++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs @@ -16,7 +16,9 @@ //! Contains the logic for executing PVFs. Used by the polkadot-execute-worker binary. -pub use polkadot_node_core_pvf_common::executor_interface::execute_artifact; +pub use polkadot_node_core_pvf_common::{ + error::ExecuteError, executor_interface::execute_artifact, +}; // NOTE: Initializing logging in e.g. tests will not have an effect in the workers, as they are // separate spawned processes. Run with e.g. `RUST_LOG=parachain::pvf-execute-worker=trace`. @@ -237,7 +239,9 @@ fn validate_using_artifact( // [`executor_interface::prepare`]. execute_artifact(compiled_artifact_blob, executor_params, params) } { - Err(err) => return JobResponse::format_invalid("execute", &err), + Err(ExecuteError::RuntimeConstruction(wasmerr)) => + return JobResponse::runtime_construction("execute", &wasmerr.to_string()), + Err(err) => return JobResponse::format_invalid("execute", &err.to_string()), Ok(d) => d, }; @@ -550,6 +554,8 @@ fn handle_parent_process( Ok(WorkerResponse::Ok { result_descriptor, duration: cpu_tv }) }, Ok(JobResponse::InvalidCandidate(err)) => Ok(WorkerResponse::InvalidCandidate(err)), + Ok(JobResponse::RuntimeConstruction(err)) => + Ok(WorkerResponse::RuntimeConstruction(err)), Err(job_error) => { gum::warn!( target: LOG_TARGET, diff --git a/polkadot/node/core/pvf/src/artifacts.rs b/polkadot/node/core/pvf/src/artifacts.rs index 78dfe71adad..6288755526d 100644 --- a/polkadot/node/core/pvf/src/artifacts.rs +++ b/polkadot/node/core/pvf/src/artifacts.rs @@ -238,6 +238,14 @@ impl Artifacts { .is_none()); } + /// Remove artifact by its id. + pub fn remove(&mut self, artifact_id: ArtifactId) -> Option<(ArtifactId, PathBuf)> { + self.inner.remove(&artifact_id).and_then(|state| match state { + ArtifactState::Prepared { path, .. } => Some((artifact_id, path)), + _ => None, + }) + } + /// Remove artifacts older than the given TTL and return id and path of the removed ones. pub fn prune(&mut self, artifact_ttl: Duration) -> Vec<(ArtifactId, PathBuf)> { let now = SystemTime::now(); diff --git a/polkadot/node/core/pvf/src/error.rs b/polkadot/node/core/pvf/src/error.rs index 80d41d5c64b..8dc96305ead 100644 --- a/polkadot/node/core/pvf/src/error.rs +++ b/polkadot/node/core/pvf/src/error.rs @@ -86,6 +86,10 @@ pub enum PossiblyInvalidError { /// vote invalid. #[error("possibly invalid: job error: {0}")] JobError(String), + /// Instantiation of the WASM module instance failed during an execution. + /// Possibly related to local issues or dirty node update. May be retried with re-preparation. + #[error("possibly invalid: runtime construction: {0}")] + RuntimeConstruction(String), } impl From for ValidationError { diff --git a/polkadot/node/core/pvf/src/execute/mod.rs b/polkadot/node/core/pvf/src/execute/mod.rs index c6d9cf90fa2..365e98196ca 100644 --- a/polkadot/node/core/pvf/src/execute/mod.rs +++ b/polkadot/node/core/pvf/src/execute/mod.rs @@ -23,4 +23,4 @@ mod queue; mod worker_interface; -pub use queue::{start, PendingExecutionRequest, ToQueue}; +pub use queue::{start, FromQueue, PendingExecutionRequest, ToQueue}; diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index aa91d11781f..bdc3c7327b0 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -25,7 +25,7 @@ use crate::{ InvalidCandidate, PossiblyInvalidError, ValidationError, LOG_TARGET, }; use futures::{ - channel::mpsc, + channel::{mpsc, oneshot}, future::BoxFuture, stream::{FuturesUnordered, StreamExt as _}, Future, FutureExt, @@ -54,6 +54,12 @@ pub enum ToQueue { Enqueue { artifact: ArtifactPathId, pending_execution_request: PendingExecutionRequest }, } +/// A response from queue. +#[derive(Debug)] +pub enum FromQueue { + RemoveArtifact { artifact: ArtifactId, reply_to: oneshot::Sender<()> }, +} + /// An execution request that should execute the PVF (known in the context) and send the results /// to the given result sender. #[derive(Debug)] @@ -137,6 +143,8 @@ struct Queue { /// The receiver that receives messages to the pool. to_queue_rx: mpsc::Receiver, + /// The sender to send messages back to validation host. + from_queue_tx: mpsc::UnboundedSender, // Some variables related to the current session. program_path: PathBuf, @@ -161,6 +169,7 @@ impl Queue { node_version: Option, security_status: SecurityStatus, to_queue_rx: mpsc::Receiver, + from_queue_tx: mpsc::UnboundedSender, ) -> Self { Self { metrics, @@ -170,6 +179,7 @@ impl Queue { node_version, security_status, to_queue_rx, + from_queue_tx, queue: VecDeque::new(), mux: Mux::new(), workers: Workers { @@ -301,7 +311,7 @@ async fn handle_mux(queue: &mut Queue, event: QueueEvent) { handle_worker_spawned(queue, idle, handle, job); }, QueueEvent::StartWork(worker, outcome, artifact_id, result_tx) => { - handle_job_finish(queue, worker, outcome, artifact_id, result_tx); + handle_job_finish(queue, worker, outcome, artifact_id, result_tx).await; }, } } @@ -327,42 +337,69 @@ fn handle_worker_spawned( /// If there are pending jobs in the queue, schedules the next of them onto the just freed up /// worker. Otherwise, puts back into the available workers list. -fn handle_job_finish( +async fn handle_job_finish( queue: &mut Queue, worker: Worker, outcome: Outcome, artifact_id: ArtifactId, result_tx: ResultSender, ) { - let (idle_worker, result, duration) = match outcome { + let (idle_worker, result, duration, sync_channel) = match outcome { Outcome::Ok { result_descriptor, duration, idle_worker } => { // TODO: propagate the soft timeout - (Some(idle_worker), Ok(result_descriptor), Some(duration)) + (Some(idle_worker), Ok(result_descriptor), Some(duration), None) }, Outcome::InvalidCandidate { err, idle_worker } => ( Some(idle_worker), Err(ValidationError::Invalid(InvalidCandidate::WorkerReportedInvalid(err))), None, + None, ), - Outcome::InternalError { err } => (None, Err(ValidationError::Internal(err)), None), + Outcome::RuntimeConstruction { err, idle_worker } => { + // The task for artifact removal is executed concurrently with + // the message to the host on the execution result. + let (result_tx, result_rx) = oneshot::channel(); + queue + .from_queue_tx + .unbounded_send(FromQueue::RemoveArtifact { + artifact: artifact_id.clone(), + reply_to: result_tx, + }) + .expect("from execute queue receiver is listened by the host; qed"); + ( + Some(idle_worker), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::RuntimeConstruction( + err, + ))), + None, + Some(result_rx), + ) + }, + Outcome::InternalError { err } => (None, Err(ValidationError::Internal(err)), None, None), // Either the worker or the job timed out. Kill the worker in either case. Treated as // definitely-invalid, because if we timed out, there's no time left for a retry. Outcome::HardTimeout => - (None, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)), None), + (None, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)), None, None), // "Maybe invalid" errors (will retry). Outcome::WorkerIntfErr => ( None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), None, + None, ), Outcome::JobDied { err } => ( None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))), None, + None, + ), + Outcome::JobError { err } => ( + None, + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))), + None, + None, ), - Outcome::JobError { err } => - (None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))), None), }; queue.metrics.execute_finished(); @@ -386,6 +423,12 @@ fn handle_job_finish( ); } + if let Some(sync_channel) = sync_channel { + // err means the sender is dropped (the artifact is already removed from the cache) + // so that's legitimate to ignore the result + let _ = sync_channel.await; + } + // First we send the result. It may fail due to the other end of the channel being dropped, // that's legitimate and we don't treat that as an error. let _ = result_tx.send(result); @@ -521,8 +564,10 @@ pub fn start( spawn_timeout: Duration, node_version: Option, security_status: SecurityStatus, -) -> (mpsc::Sender, impl Future) { +) -> (mpsc::Sender, mpsc::UnboundedReceiver, impl Future) { let (to_queue_tx, to_queue_rx) = mpsc::channel(20); + let (from_queue_tx, from_queue_rx) = mpsc::unbounded(); + let run = Queue::new( metrics, program_path, @@ -532,7 +577,8 @@ pub fn start( node_version, security_status, to_queue_rx, + from_queue_tx, ) .run(); - (to_queue_tx, run) + (to_queue_tx, from_queue_rx, run) } diff --git a/polkadot/node/core/pvf/src/execute/worker_interface.rs b/polkadot/node/core/pvf/src/execute/worker_interface.rs index 9f7738f00e6..db81da118d7 100644 --- a/polkadot/node/core/pvf/src/execute/worker_interface.rs +++ b/polkadot/node/core/pvf/src/execute/worker_interface.rs @@ -87,6 +87,10 @@ pub enum Outcome { /// a trap. Errors related to the preparation process are not expected to be encountered by the /// execution workers. InvalidCandidate { err: String, idle_worker: IdleWorker }, + /// The error is probably transient. It may be for example + /// because the artifact was prepared with a Wasmtime version different from the version + /// in the current execution environment. + RuntimeConstruction { err: String, idle_worker: IdleWorker }, /// The execution time exceeded the hard limit. The worker is terminated. HardTimeout, /// An I/O error happened during communication with the worker. This may mean that the worker @@ -193,6 +197,10 @@ pub async fn start_work( err, idle_worker: IdleWorker { stream, pid, worker_dir }, }, + WorkerResponse::RuntimeConstruction(err) => Outcome::RuntimeConstruction { + err, + idle_worker: IdleWorker { stream, pid, worker_dir }, + }, WorkerResponse::JobTimedOut => Outcome::HardTimeout, WorkerResponse::JobDied { err, job_pid: _ } => Outcome::JobDied { err }, WorkerResponse::JobError(err) => Outcome::JobError { err }, diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index ae9fdc7d2de..8ec46f4b08f 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -274,7 +274,7 @@ pub async fn start( from_prepare_pool, ); - let (to_execute_queue_tx, run_execute_queue) = execute::start( + let (to_execute_queue_tx, from_execute_queue_rx, run_execute_queue) = execute::start( metrics, config.execute_worker_program_path.to_owned(), config.cache_path.clone(), @@ -296,6 +296,7 @@ pub async fn start( to_prepare_queue_tx, from_prepare_queue_rx, to_execute_queue_tx, + from_execute_queue_rx, to_sweeper_tx, awaiting_prepare: AwaitingPrepare::default(), }) @@ -342,6 +343,8 @@ struct Inner { from_prepare_queue_rx: mpsc::UnboundedReceiver, to_execute_queue_tx: mpsc::Sender, + from_execute_queue_rx: mpsc::UnboundedReceiver, + to_sweeper_tx: mpsc::Sender, awaiting_prepare: AwaitingPrepare, @@ -358,6 +361,7 @@ async fn run( to_host_rx, from_prepare_queue_rx, mut to_prepare_queue_tx, + from_execute_queue_rx, mut to_execute_queue_tx, mut to_sweeper_tx, mut awaiting_prepare, @@ -384,10 +388,21 @@ async fn run( let mut to_host_rx = to_host_rx.fuse(); let mut from_prepare_queue_rx = from_prepare_queue_rx.fuse(); + let mut from_execute_queue_rx = from_execute_queue_rx.fuse(); loop { // biased to make it behave deterministically for tests. futures::select_biased! { + from_execute_queue_rx = from_execute_queue_rx.next() => { + let from_queue = break_if_fatal!(from_execute_queue_rx.ok_or(Fatal)); + let execute::FromQueue::RemoveArtifact { artifact, reply_to } = from_queue; + break_if_fatal!(handle_artifact_removal( + &mut to_sweeper_tx, + &mut artifacts, + artifact, + reply_to, + ).await); + }, () = cleanup_pulse.select_next_some() => { // `select_next_some` because we don't expect this to fail, but if it does, we // still don't fail. The trade-off is that the compiled cache will start growing @@ -861,6 +876,37 @@ async fn handle_cleanup_pulse( Ok(()) } +async fn handle_artifact_removal( + sweeper_tx: &mut mpsc::Sender, + artifacts: &mut Artifacts, + artifact_id: ArtifactId, + reply_to: oneshot::Sender<()>, +) -> Result<(), Fatal> { + let (artifact_id, path) = if let Some(artifact) = artifacts.remove(artifact_id) { + artifact + } else { + // if we haven't found the artifact by its id, + // it has been probably removed + // anyway with the randomness of the artifact name + // it is safe to ignore + return Ok(()); + }; + reply_to + .send(()) + .expect("the execute queue waits for the artifact remove confirmation; qed"); + // Thanks to the randomness of the artifact name (see + // `artifacts::generate_artifact_path`) there is no issue with any name conflict on + // future repreparation. + // So we can confirm the artifact removal already + gum::debug!( + target: LOG_TARGET, + validation_code_hash = ?artifact_id.code_hash, + "PVF pruning: pruning artifact by request from the execute queue", + ); + sweeper_tx.send(path).await.map_err(|_| Fatal)?; + Ok(()) +} + /// A simple task which sole purpose is to delete files thrown at it. async fn sweeper_task(mut sweeper_rx: mpsc::Receiver) { loop { @@ -968,6 +1014,8 @@ pub(crate) mod tests { to_prepare_queue_rx: mpsc::Receiver, from_prepare_queue_tx: mpsc::UnboundedSender, to_execute_queue_rx: mpsc::Receiver, + #[allow(unused)] + from_execute_queue_tx: mpsc::UnboundedSender, to_sweeper_rx: mpsc::Receiver, run: BoxFuture<'static, ()>, @@ -979,6 +1027,7 @@ pub(crate) mod tests { let (to_prepare_queue_tx, to_prepare_queue_rx) = mpsc::channel(10); let (from_prepare_queue_tx, from_prepare_queue_rx) = mpsc::unbounded(); let (to_execute_queue_tx, to_execute_queue_rx) = mpsc::channel(10); + let (from_execute_queue_tx, from_execute_queue_rx) = mpsc::unbounded(); let (to_sweeper_tx, to_sweeper_rx) = mpsc::channel(10); let run = run(Inner { @@ -989,6 +1038,7 @@ pub(crate) mod tests { to_prepare_queue_tx, from_prepare_queue_rx, to_execute_queue_tx, + from_execute_queue_rx, to_sweeper_tx, awaiting_prepare: AwaitingPrepare::default(), }) @@ -999,6 +1049,7 @@ pub(crate) mod tests { to_prepare_queue_rx, from_prepare_queue_tx, to_execute_queue_rx, + from_execute_queue_tx, to_sweeper_rx, run, } diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index bcc10749e74..cdfbcd8e578 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -21,13 +21,14 @@ use parity_scale_codec::Encode as _; #[cfg(all(feature = "ci-only-tests", target_os = "linux"))] use polkadot_node_core_pvf::SecurityStatus; use polkadot_node_core_pvf::{ - start, testing::build_workers_and_get_paths, Config, InvalidCandidate, Metrics, PrepareError, - PrepareJobKind, PvfPrepData, ValidationError, ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, + start, testing::build_workers_and_get_paths, Config, InvalidCandidate, Metrics, + PossiblyInvalidError, PrepareError, PrepareJobKind, PvfPrepData, ValidationError, + ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }; use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams, ValidationResult}; use polkadot_primitives::{ExecutorParam, ExecutorParams}; -use std::time::Duration; +use std::{io::Write, time::Duration}; use tokio::sync::Mutex; mod adder; @@ -352,10 +353,80 @@ async fn deleting_prepared_artifact_does_not_dispute() { ) .await; - match result { - Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)) => {}, - r => panic!("{:?}", r), + assert_matches!(result, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout))); +} + +// Test that corruption of a prepared artifact does not lead to a dispute when we try to execute it. +#[tokio::test] +async fn corrupted_prepared_artifact_does_not_dispute() { + let host = TestHost::new().await; + let cache_dir = host.cache_dir.path(); + + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); + + // Manually corrupting the prepared artifact from disk. The in-memory artifacts table won't + // change. + let artifact_path = { + // Get the artifact path (asserting it exists). + let mut cache_dir: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); + // Should contain the artifact and the worker dir. + assert_eq!(cache_dir.len(), 2); + let mut artifact_path = cache_dir.pop().unwrap().unwrap(); + if artifact_path.path().is_dir() { + artifact_path = cache_dir.pop().unwrap().unwrap(); + } + + // Corrupt the artifact. + let mut f = std::fs::OpenOptions::new() + .write(true) + .truncate(true) + .open(artifact_path.path()) + .unwrap(); + f.write_all(b"corrupted wasm").unwrap(); + f.flush().unwrap(); + artifact_path + }; + + assert!(artifact_path.path().exists()); + + // Try to validate, artifact should get removed because of the corruption. + let result = host + .validate_candidate( + halt::wasm_binary_unwrap(), + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ) + .await; + + assert_matches!( + result, + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::RuntimeConstruction(_))) + ); + + // because of RuntimeConstruction we may retry + host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); + + // The actual artifact removal is done concurrently + // with sending of the result of the execution + // it is not a problem for further re-preparation as + // artifact filenames are random + for _ in 1..5 { + if !artifact_path.path().exists() { + break; + } + tokio::time::sleep(Duration::from_secs(1)).await; } + + assert!( + !artifact_path.path().exists(), + "the corrupted artifact ({}) should be deleted by the host", + artifact_path.path().display() + ); } #[tokio::test] diff --git a/polkadot/primitives/src/v6/executor_params.rs b/polkadot/primitives/src/v6/executor_params.rs index 112a529f62b..1e19f3b23fe 100644 --- a/polkadot/primitives/src/v6/executor_params.rs +++ b/polkadot/primitives/src/v6/executor_params.rs @@ -159,7 +159,9 @@ impl sp_std::fmt::LowerHex for ExecutorParamsHash { // into individual fields of the structure. Thus, complex migrations shall be avoided when adding // new entries and removing old ones. At the moment, there's no mandatory parameters defined. If // they show up, they must be clearly documented as mandatory ones. -#[derive(Clone, Debug, Encode, Decode, PartialEq, Eq, TypeInfo, Serialize, Deserialize)] +#[derive( + Clone, Debug, Default, Encode, Decode, PartialEq, Eq, TypeInfo, Serialize, Deserialize, +)] pub struct ExecutorParams(Vec); impl ExecutorParams { @@ -334,9 +336,3 @@ impl From<&[ExecutorParam]> for ExecutorParams { ExecutorParams(arr.to_vec()) } } - -impl Default for ExecutorParams { - fn default() -> Self { - ExecutorParams(vec![]) - } -} diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md index e0984bd58d1..39a317a07c8 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md @@ -125,6 +125,14 @@ execution request: reason, which may or may not be independent of the candidate or PVF. 5. **Internal errors:** See "Internal Errors" section. In this case, after the retry we abstain from voting. +6. **RuntimeConstruction** error. The precheck handles a general case of a wrong + artifact but doesn't guarantee its consistency between the preparation and + the execution. If something happened with the artifact between + the preparation of the artifact and its execution (e.g. the artifact was + corrupted on disk or a dirty node upgrade happened when the prepare worker + has a wasmtime version different from the execute worker's wasmtime version). + We treat such an error as possibly transient due to local issues and retry + one time. ### Preparation timeouts diff --git a/prdoc/pr_3187.prdoc b/prdoc/pr_3187.prdoc new file mode 100644 index 00000000000..bda41142d86 --- /dev/null +++ b/prdoc/pr_3187.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Retrying an execution on failed runtime construction + +doc: + - audience: Node Dev + description: | + If a runtime construction error happened during the execution request, then the artifact is re-prepared + and the execution request is retried at most once. See also the related issue. + +crates: + - name: polkadot-node-core-candidate-validation + - name: polkadot-node-core-pvf + - name: polkadot-node-core-pvf-execute-worker + - name: polkadot-node-core-pvf-common -- GitLab From 576681b867a234b42aac282940455f9be71d108b Mon Sep 17 00:00:00 2001 From: Clara van Staden Date: Wed, 28 Feb 2024 21:42:35 +0200 Subject: [PATCH 18/86] Snowbridge - Extract Ethereum Chain ID (#3501) While adding runtime tests to https://github.com/polkadot-fellows/runtimes/pull/130, I noticed the Ethereum chain ID was hardcoded. For Kusama + Polkadot, the Ethereum chain ID should 1 (Mainnet), whereas on Rococo it is 11155111 (Sepolia). This PR also updates the Snowbridge crates versions to the current versions on crates.io. --------- Co-authored-by: claravanstaden --- Cargo.lock | 28 +++++++++---------- .../pallets/ethereum-client/Cargo.toml | 2 +- .../pallets/inbound-queue/Cargo.toml | 2 +- .../pallets/inbound-queue/fixtures/Cargo.toml | 2 +- .../pallets/outbound-queue/Cargo.toml | 2 +- .../outbound-queue/merkle-tree/Cargo.toml | 2 +- .../outbound-queue/runtime-api/Cargo.toml | 2 +- bridges/snowbridge/pallets/system/Cargo.toml | 2 +- .../pallets/system/runtime-api/Cargo.toml | 2 +- .../snowbridge/primitives/beacon/Cargo.toml | 2 +- bridges/snowbridge/primitives/core/Cargo.toml | 2 +- .../snowbridge/primitives/ethereum/Cargo.toml | 2 +- .../snowbridge/primitives/router/Cargo.toml | 2 +- .../runtime/runtime-common/Cargo.toml | 2 +- .../snowbridge/runtime/test-common/Cargo.toml | 2 +- .../snowbridge/runtime/test-common/src/lib.rs | 12 ++++++-- .../snowbridge/scripts/contribute-upstream.sh | 2 ++ .../bridge-hub-rococo/tests/snowbridge.rs | 5 ++++ 18 files changed, 45 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ef510986257..197d05e2633 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17856,7 +17856,7 @@ dependencies = [ [[package]] name = "snowbridge-beacon-primitives" -version = "0.0.0" +version = "0.2.0" dependencies = [ "byte-slice-cast", "frame-support", @@ -17880,7 +17880,7 @@ dependencies = [ [[package]] name = "snowbridge-core" -version = "0.0.0" +version = "0.2.0" dependencies = [ "ethabi-decode", "frame-support", @@ -17903,7 +17903,7 @@ dependencies = [ [[package]] name = "snowbridge-ethereum" -version = "0.1.0" +version = "0.3.0" dependencies = [ "ethabi-decode", "ethbloom", @@ -17942,7 +17942,7 @@ dependencies = [ [[package]] name = "snowbridge-outbound-queue-merkle-tree" -version = "0.1.1" +version = "0.3.0" dependencies = [ "array-bytes 4.2.0", "env_logger 0.9.3", @@ -17957,7 +17957,7 @@ dependencies = [ [[package]] name = "snowbridge-outbound-queue-runtime-api" -version = "0.0.0" +version = "0.2.0" dependencies = [ "frame-support", "parity-scale-codec", @@ -17971,7 +17971,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-ethereum-client" -version = "0.0.0" +version = "0.2.0" dependencies = [ "bp-runtime", "byte-slice-cast", @@ -18017,7 +18017,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-inbound-queue" -version = "0.0.0" +version = "0.2.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -18050,7 +18050,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-inbound-queue-fixtures" -version = "0.9.0" +version = "0.10.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -18064,7 +18064,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-outbound-queue" -version = "0.0.0" +version = "0.2.0" dependencies = [ "bridge-hub-common", "ethabi-decode", @@ -18089,7 +18089,7 @@ dependencies = [ [[package]] name = "snowbridge-pallet-system" -version = "0.0.0" +version = "0.2.0" dependencies = [ "ethabi-decode", "frame-benchmarking", @@ -18117,7 +18117,7 @@ dependencies = [ [[package]] name = "snowbridge-router-primitives" -version = "0.0.0" +version = "0.9.0" dependencies = [ "ethabi-decode", "frame-support", @@ -18140,7 +18140,7 @@ dependencies = [ [[package]] name = "snowbridge-runtime-common" -version = "0.0.0" +version = "0.2.0" dependencies = [ "frame-support", "frame-system", @@ -18156,7 +18156,7 @@ dependencies = [ [[package]] name = "snowbridge-runtime-test-common" -version = "0.0.0" +version = "0.2.0" dependencies = [ "assets-common", "bridge-hub-test-utils", @@ -18233,7 +18233,7 @@ dependencies = [ [[package]] name = "snowbridge-system-runtime-api" -version = "0.0.0" +version = "0.2.0" dependencies = [ "parity-scale-codec", "snowbridge-core", diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml index 99b42905311..c8999633c97 100644 --- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-ethereum-client" description = "Snowbridge Ethereum Client Pallet" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml index c26ef90a22d..b850496cd4e 100644 --- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-inbound-queue" description = "Snowbridge Inbound Queue Pallet" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml index 61f1421e056..64605a42f0d 100644 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-inbound-queue-fixtures" description = "Snowbridge Inbound Queue Test Fixtures" -version = "0.9.0" +version = "0.10.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml index 40e57db4bbd..f16a28cb1e4 100644 --- a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-outbound-queue" description = "Snowbridge Outbound Queue Pallet" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml index c185d5af706..0606e9de330 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-outbound-queue-merkle-tree" description = "Snowbridge Outbound Queue Merkle Tree" -version = "0.1.1" +version = "0.3.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml index 347b3bae493..cb68fd0a250 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-outbound-queue-runtime-api" description = "Snowbridge Outbound Queue Runtime API" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/system/Cargo.toml b/bridges/snowbridge/pallets/system/Cargo.toml index f6c642e7376..5ad04290de0 100644 --- a/bridges/snowbridge/pallets/system/Cargo.toml +++ b/bridges/snowbridge/pallets/system/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-pallet-system" description = "Snowbridge System Pallet" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml index 355d2d29147..eb02ae1db52 100644 --- a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-system-runtime-api" description = "Snowbridge System Runtime API" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/primitives/beacon/Cargo.toml b/bridges/snowbridge/primitives/beacon/Cargo.toml index e021bb01071..d181fa1d394 100644 --- a/bridges/snowbridge/primitives/beacon/Cargo.toml +++ b/bridges/snowbridge/primitives/beacon/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-beacon-primitives" description = "Snowbridge Beacon Primitives" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml index 090c48c403f..9a299ad0ae9 100644 --- a/bridges/snowbridge/primitives/core/Cargo.toml +++ b/bridges/snowbridge/primitives/core/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-core" description = "Snowbridge Core" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/primitives/ethereum/Cargo.toml b/bridges/snowbridge/primitives/ethereum/Cargo.toml index 399139cef38..9fa725a6c05 100644 --- a/bridges/snowbridge/primitives/ethereum/Cargo.toml +++ b/bridges/snowbridge/primitives/ethereum/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-ethereum" description = "Snowbridge Ethereum" -version = "0.1.0" +version = "0.3.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml index 750a2a73e63..ded773e0d38 100644 --- a/bridges/snowbridge/primitives/router/Cargo.toml +++ b/bridges/snowbridge/primitives/router/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-router-primitives" description = "Snowbridge Router Primitives" -version = "0.0.0" +version = "0.9.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/runtime/runtime-common/Cargo.toml b/bridges/snowbridge/runtime/runtime-common/Cargo.toml index d4c86f8aa75..bf5e9a8832d 100644 --- a/bridges/snowbridge/runtime/runtime-common/Cargo.toml +++ b/bridges/snowbridge/runtime/runtime-common/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-runtime-common" description = "Snowbridge Runtime Common" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition.workspace = true repository.workspace = true diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index ec4466b3426..4e8b311cb97 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "snowbridge-runtime-test-common" description = "Snowbridge Runtime Tests" -version = "0.0.0" +version = "0.2.0" authors = ["Snowfork "] edition = "2021" license = "Apache-2.0" diff --git a/bridges/snowbridge/runtime/test-common/src/lib.rs b/bridges/snowbridge/runtime/test-common/src/lib.rs index 29b0e738c18..7455adf7617 100644 --- a/bridges/snowbridge/runtime/test-common/src/lib.rs +++ b/bridges/snowbridge/runtime/test-common/src/lib.rs @@ -40,6 +40,7 @@ where } pub fn send_transfer_token_message( + ethereum_chain_id: u64, assethub_parachain_id: u32, weth_contract_address: H160, destination_address: H160, @@ -89,7 +90,7 @@ where WithdrawAsset(Assets::from(vec![fee.clone()])), BuyExecution { fees: fee, weight_limit: Unlimited }, ExportMessage { - network: Ethereum { chain_id: 11155111 }, + network: Ethereum { chain_id: ethereum_chain_id }, destination: Here, xcm: inner_xcm, }, @@ -107,6 +108,7 @@ where } pub fn send_transfer_token_message_success( + ethereum_chain_id: u64, collator_session_key: CollatorSessionKeys, runtime_para_id: u32, assethub_parachain_id: u32, @@ -149,6 +151,7 @@ pub fn send_transfer_token_message_success( initial_fund::(assethub_parachain_id, 5_000_000_000_000); let outcome = send_transfer_token_message::( + ethereum_chain_id, assethub_parachain_id, weth_contract_address, destination_address, @@ -205,6 +208,7 @@ pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works< XcmConfig, AllPalletsWithoutSystem, >( + ethereum_chain_id: u64, collator_session_key: CollatorSessionKeys, runtime_para_id: u32, assethub_parachain_id: u32, @@ -249,6 +253,7 @@ pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works< initial_fund::(assethub_parachain_id, 5_000_000_000_000); let outcome = send_transfer_token_message::( + ethereum_chain_id, assethub_parachain_id, weth_contract_address, destination_address, @@ -290,6 +295,7 @@ pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works< } pub fn send_unpaid_transfer_token_message( + ethereum_chain_id: u64, collator_session_key: CollatorSessionKeys, runtime_para_id: u32, assethub_parachain_id: u32, @@ -353,7 +359,7 @@ pub fn send_unpaid_transfer_token_message( let xcm = Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, ExportMessage { - network: Ethereum { chain_id: 11155111 }, + network: Ethereum { chain_id: ethereum_chain_id }, destination: Here, xcm: inner_xcm, }, @@ -375,6 +381,7 @@ pub fn send_unpaid_transfer_token_message( #[allow(clippy::too_many_arguments)] pub fn send_transfer_token_message_failure( + ethereum_chain_id: u64, collator_session_key: CollatorSessionKeys, runtime_para_id: u32, assethub_parachain_id: u32, @@ -414,6 +421,7 @@ pub fn send_transfer_token_message_failure( initial_fund::(assethub_parachain_id, initial_amount); let outcome = send_transfer_token_message::( + ethereum_chain_id, assethub_parachain_id, weth_contract_address, destination_address, diff --git a/bridges/snowbridge/scripts/contribute-upstream.sh b/bridges/snowbridge/scripts/contribute-upstream.sh index 8aa2d2a7035..32005b770ec 100755 --- a/bridges/snowbridge/scripts/contribute-upstream.sh +++ b/bridges/snowbridge/scripts/contribute-upstream.sh @@ -40,10 +40,12 @@ git checkout "$branch_name" # remove everything we think is not required for our needs rm -rf rust-toolchain.toml +rm -rf codecov.yml rm -rf $SNOWBRIDGE_FOLDER/.cargo rm -rf $SNOWBRIDGE_FOLDER/.github rm -rf $SNOWBRIDGE_FOLDER/SECURITY.md rm -rf $SNOWBRIDGE_FOLDER/.gitignore +rm -rf $SNOWBRIDGE_FOLDER/rustfmt.toml rm -rf $SNOWBRIDGE_FOLDER/templates rm -rf $SNOWBRIDGE_FOLDER/pallets/ethereum-client/fuzz diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs index b9f43624b65..239bd946e75 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs @@ -51,6 +51,7 @@ fn collator_session_keys() -> bridge_hub_test_utils::CollatorSessionKeys( + 11155111, collator_session_keys(), 1013, 1000, @@ -69,6 +70,7 @@ pub fn transfer_token_to_ethereum_works() { #[test] pub fn unpaid_transfer_token_to_ethereum_fails_with_barrier() { snowbridge_runtime_test_common::send_unpaid_transfer_token_message::( + 11155111, collator_session_keys(), 1013, 1000, @@ -80,6 +82,7 @@ pub fn unpaid_transfer_token_to_ethereum_fails_with_barrier() { #[test] pub fn transfer_token_to_ethereum_fee_not_enough() { snowbridge_runtime_test_common::send_transfer_token_message_failure::( + 11155111, collator_session_keys(), 1013, 1000, @@ -95,6 +98,7 @@ pub fn transfer_token_to_ethereum_fee_not_enough() { #[test] pub fn transfer_token_to_ethereum_insufficient_fund() { snowbridge_runtime_test_common::send_transfer_token_message_failure::( + 11155111, collator_session_keys(), 1013, 1000, @@ -146,6 +150,7 @@ pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works() { XcmConfig, AllPalletsWithoutSystem, >( + 11155111, collator_session_keys(), 1013, 1000, -- GitLab From eefd5fe4499515da66d088505093f75b5aa22550 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 28 Feb 2024 20:49:00 +0100 Subject: [PATCH 19/86] Multi-Block-Migrations, `poll` hook and new System callbacks (#1781) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This MR is the merge of https://github.com/paritytech/substrate/pull/14414 and https://github.com/paritytech/substrate/pull/14275. It implements [RFC#13](https://github.com/polkadot-fellows/RFCs/pull/13), closes https://github.com/paritytech/polkadot-sdk/issues/198. ----- This Merge request introduces three major topicals: 1. Multi-Block-Migrations 1. New pallet `poll` hook for periodic service work 1. Replacement hooks for `on_initialize` and `on_finalize` in cases where `poll` cannot be used and some more general changes to FRAME. The changes for each topical span over multiple crates. They are listed in topical order below. # 1.) Multi-Block-Migrations Multi-Block-Migrations are facilitated by creating `pallet_migrations` and configuring `System::Config::MultiBlockMigrator` to point to it. Executive picks this up and triggers one step of the migrations pallet per block. The chain is in lockdown mode for as long as an MBM is ongoing. Executive does this by polling `MultiBlockMigrator::ongoing` and not allowing any transaction in a block, if true. A MBM is defined through trait `SteppedMigration`. A condensed version looks like this: ```rust /// A migration that can proceed in multiple steps. pub trait SteppedMigration { type Cursor: FullCodec + MaxEncodedLen; type Identifier: FullCodec + MaxEncodedLen; fn id() -> Self::Identifier; fn max_steps() -> Option; fn step( cursor: Option, meter: &mut WeightMeter, ) -> Result, SteppedMigrationError>; } ``` `pallet_migrations` can be configured with an aggregated tuple of these migrations. It then starts to migrate them one-by-one on the next runtime upgrade. Two things are important here: - 1. Doing another runtime upgrade while MBMs are ongoing is not a good idea and can lead to messed up state. - 2. **Pallet Migrations MUST BE CONFIGURED IN `System::Config`, otherwise it is not used.** The pallet supports an `UpgradeStatusHandler` that can be used to notify external logic of upgrade start/finish (for example to pause XCM dispatch). Error recovery is very limited in the case that a migration errors or times out (exceeds its `max_steps`). Currently the runtime dev can decide in `FailedMigrationHandler::failed` how to handle this. One follow-up would be to pair this with the `SafeMode` pallet and enact safe mode when an upgrade fails, to allow governance to rescue the chain. This is currently not possible, since governance is not `Mandatory`. ## Runtime API - `Core`: `initialize_block` now returns `ExtrinsicInclusionMode` to inform the Block Author whether they can push transactions. ### Integration Add it to your runtime implementation of `Core` and `BlockBuilder`: ```patch diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs @@ impl_runtime_apis! { impl sp_block_builder::Core for Runtime { - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> RuntimeExecutiveMode { Executive::initialize_block(header) } ... } ``` # 2.) `poll` hook A new pallet hook is introduced: `poll`. `Poll` is intended to replace mostly all usage of `on_initialize`. The reason for this is that any code that can be called from `on_initialize` cannot be migrated through an MBM. Currently there is no way to statically check this; the implication is to use `on_initialize` as rarely as possible. Failing to do so can result in broken storage invariants. The implementation of the poll hook depends on the `Runtime API` changes that are explained above. # 3.) Hard-Deadline callbacks Three new callbacks are introduced and configured on `System::Config`: `PreInherents`, `PostInherents` and `PostTransactions`. These hooks are meant as replacement for `on_initialize` and `on_finalize` in cases where the code that runs cannot be moved to `poll`. The reason for this is to make the usage of HD-code (hard deadline) more explicit - again to prevent broken invariants by MBMs. # 4.) FRAME (general changes) ## `frame_system` pallet A new memorize storage item `InherentsApplied` is added. It is used by executive to track whether inherents have already been applied. Executive and can then execute the MBMs directly between inherents and transactions. The `Config` gets five new items: - `SingleBlockMigrations` this is the new way of configuring migrations that run in a single block. Previously they were defined as last generic argument of `Executive`. This shift is brings all central configuration about migrations closer into view of the developer (migrations that are configured in `Executive` will still work for now but is deprecated). - `MultiBlockMigrator` this can be configured to an engine that drives MBMs. One example would be the `pallet_migrations`. Note that this is only the engine; the exact MBMs are injected into the engine. - `PreInherents` a callback that executes after `on_initialize` but before inherents. - `PostInherents` a callback that executes after all inherents ran (including MBMs and `poll`). - `PostTransactions` in symmetry to `PreInherents`, this one is called before `on_finalize` but after all transactions. A sane default is to set all of these to `()`. Example diff suitable for any chain: ```patch @@ impl frame_system::Config for Test { type MaxConsumers = ConstU32<16>; + type SingleBlockMigrations = (); + type MultiBlockMigrator = (); + type PreInherents = (); + type PostInherents = (); + type PostTransactions = (); } ``` An overview of how the block execution now looks like is here. The same graph is also in the rust doc.
Block Execution Flow

![Screenshot 2023-12-04 at 19 11 29](https://github.com/paritytech/polkadot-sdk/assets/10380170/e88a80c4-ef11-4faa-8df5-8b33a724c054)

## Inherent Order Moved to https://github.com/paritytech/polkadot-sdk/pull/2154 --------------- ## TODO - [ ] Check that `try-runtime` still works - [ ] Ensure backwards compatibility with old Runtime APIs - [x] Consume weight correctly - [x] Cleanup --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Liam Aharon Co-authored-by: Juan Girini Co-authored-by: command-bot <> Co-authored-by: Francisco Aguirre Co-authored-by: Gavin Wood Co-authored-by: Bastian KΓΆcher --- Cargo.lock | 110 +- Cargo.toml | 1 + .../pallets/inbound-queue/src/mock.rs | 13 +- .../pallets/outbound-queue/src/mock.rs | 14 +- bridges/snowbridge/pallets/system/src/mock.rs | 15 +- .../ethereum-beacon-client/src/mock.rs | 259 +++ cumulus/parachain-template/runtime/src/lib.rs | 2 +- .../assets/asset-hub-rococo/src/lib.rs | 2 +- .../assets/asset-hub-westend/src/lib.rs | 2 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../collectives-westend/src/lib.rs | 2 +- .../contracts/contracts-rococo/src/lib.rs | 2 +- .../coretime/coretime-rococo/src/lib.rs | 2 +- .../coretime/coretime-westend/src/lib.rs | 2 +- .../glutton/glutton-westend/src/lib.rs | 2 +- .../runtimes/people/people-rococo/src/lib.rs | 2 +- .../runtimes/people/people-westend/src/lib.rs | 2 +- .../runtimes/starters/seedling/src/lib.rs | 2 +- .../runtimes/starters/shell/src/lib.rs | 2 +- .../runtimes/testing/penpal/src/lib.rs | 2 +- .../testing/rococo-parachain/src/lib.rs | 2 +- .../asset_hub_polkadot_aura.rs | 2 +- .../src/fake_runtime_api/aura.rs | 2 +- cumulus/test/runtime/src/lib.rs | 2 +- polkadot/node/service/src/fake_runtime_api.rs | 2 +- polkadot/runtime/common/src/claims.rs | 29 +- polkadot/runtime/rococo/src/lib.rs | 2 +- polkadot/runtime/test-runtime/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 2 +- prdoc/pr_1781.prdoc | 45 + substrate/bin/minimal/runtime/src/lib.rs | 5 +- .../bin/node-template/runtime/src/lib.rs | 2 +- substrate/bin/node/runtime/Cargo.toml | 4 + substrate/bin/node/runtime/src/lib.rs | 24 +- .../basic-authorship/src/basic_authorship.rs | 13 +- substrate/client/block-builder/src/lib.rs | 26 +- substrate/client/proposer-metrics/src/lib.rs | 4 + .../rpc-spec-v2/src/chain_head/tests.rs | 6 +- substrate/client/rpc/src/state/tests.rs | 2 +- substrate/frame/executive/Cargo.toml | 2 + substrate/frame/executive/src/lib.rs | 1194 +++----------- substrate/frame/executive/src/tests.rs | 1389 +++++++++++++++++ substrate/frame/migrations/Cargo.toml | 64 + .../frame/migrations/src/benchmarking.rs | 222 +++ substrate/frame/migrations/src/lib.rs | 746 +++++++++ substrate/frame/migrations/src/mock.rs | 163 ++ .../frame/migrations/src/mock_helpers.rs | 142 ++ substrate/frame/migrations/src/tests.rs | 335 ++++ substrate/frame/migrations/src/weights.rs | 358 +++++ substrate/frame/src/lib.rs | 2 +- .../src/construct_runtime/expand/inherent.rs | 63 +- .../procedural/src/pallet/expand/hooks.rs | 16 + substrate/frame/support/src/migrations.rs | 636 +++++++- .../support/src/storage/transactional.rs | 16 + substrate/frame/support/src/traits.rs | 11 +- substrate/frame/support/src/traits/hooks.rs | 70 + substrate/frame/support/src/traits/misc.rs | 17 +- .../undefined_inherent_part.stderr | 23 +- .../test/tests/pallet_outer_enums_explicit.rs | 1 + .../test/tests/pallet_outer_enums_implicit.rs | 1 + .../support/test/tests/runtime_metadata.rs | 6 +- substrate/frame/system/src/lib.rs | 74 +- substrate/frame/system/src/mock.rs | 16 + substrate/frame/system/src/tests.rs | 22 + .../api/proc-macro/src/decl_runtime_apis.rs | 1 + .../proc-macro/src/mock_impl_runtime_apis.rs | 2 +- substrate/primitives/api/src/lib.rs | 18 +- .../api/test/tests/decl_and_impl.rs | 2 +- .../ui/impl_incorrect_method_signature.rs | 2 +- .../api/test/tests/ui/impl_missing_version.rs | 2 +- .../test/tests/ui/missing_versioned_method.rs | 2 +- .../missing_versioned_method_multiple_vers.rs | 2 +- .../ui/positive_cases/custom_where_bound.rs | 2 +- .../tests/ui/positive_cases/default_impls.rs | 2 +- ...ype_reference_in_impl_runtime_apis_call.rs | 2 +- substrate/primitives/runtime/src/lib.rs | 10 + substrate/test-utils/runtime/src/lib.rs | 6 +- 78 files changed, 5072 insertions(+), 1188 deletions(-) create mode 100644 bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs create mode 100644 prdoc/pr_1781.prdoc create mode 100644 substrate/frame/executive/src/tests.rs create mode 100644 substrate/frame/migrations/Cargo.toml create mode 100644 substrate/frame/migrations/src/benchmarking.rs create mode 100644 substrate/frame/migrations/src/lib.rs create mode 100644 substrate/frame/migrations/src/mock.rs create mode 100644 substrate/frame/migrations/src/mock_helpers.rs create mode 100644 substrate/frame/migrations/src/tests.rs create mode 100644 substrate/frame/migrations/src/weights.rs diff --git a/Cargo.lock b/Cargo.lock index 197d05e2633..3838ed0e94a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -322,6 +322,20 @@ dependencies = [ "num-traits", ] +[[package]] +name = "aquamarine" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1da02abba9f9063d786eab1509833ebb2fac0f966862ca59439c76b9c566760" +dependencies = [ + "include_dir", + "itertools 0.10.5", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "aquamarine" version = "0.5.0" @@ -4055,7 +4069,7 @@ dependencies = [ "cumulus-primitives-core", "cumulus-primitives-proof-size-hostfunction", "cumulus-test-runtime", - "docify", + "docify 0.2.7", "frame-support", "frame-system", "log", @@ -4736,13 +4750,39 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "docify" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af1b04e6ef3d21119d3eb7b032bca17f99fe041e9c072f30f32cc0e1a2b1f3c4" +dependencies = [ + "docify_macros 0.1.16", +] + [[package]] name = "docify" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7cc4fd38aaa9fb98ac70794c82a00360d1e165a87fbf96a8a91f9dfc602aaee2" dependencies = [ - "docify_macros", + "docify_macros 0.2.7", +] + +[[package]] +name = "docify_macros" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b5610df7f2acf89a1bb5d1a66ae56b1c7fcdcfe3948856fb3ace3f644d70eb7" +dependencies = [ + "common-path", + "derive-syn-parse", + "lazy_static", + "proc-macro2", + "quote", + "regex", + "syn 2.0.50", + "termcolor", + "walkdir", ] [[package]] @@ -5451,7 +5491,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" name = "frame" version = "0.0.1-dev" dependencies = [ - "docify", + "docify 0.2.7", "frame-executive", "frame-support", "frame-system", @@ -5619,6 +5659,7 @@ dependencies = [ name = "frame-executive" version = "28.0.0" dependencies = [ + "aquamarine 0.3.3", "array-bytes 6.1.0", "frame-support", "frame-system", @@ -5675,11 +5716,11 @@ dependencies = [ name = "frame-support" version = "28.0.0" dependencies = [ - "aquamarine", + "aquamarine 0.5.0", "array-bytes 6.1.0", "assert_matches", "bitflags 1.3.2", - "docify", + "docify 0.2.7", "environmental", "frame-metadata", "frame-support-procedural", @@ -5822,7 +5863,7 @@ version = "28.0.0" dependencies = [ "cfg-if", "criterion 0.4.0", - "docify", + "docify 0.2.7", "frame-support", "log", "parity-scale-codec", @@ -7081,6 +7122,7 @@ dependencies = [ "pallet-lottery", "pallet-membership", "pallet-message-queue", + "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", @@ -9301,8 +9343,8 @@ dependencies = [ name = "pallet-bags-list" version = "27.0.0" dependencies = [ - "aquamarine", - "docify", + "aquamarine 0.5.0", + "docify 0.2.7", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -9350,7 +9392,7 @@ dependencies = [ name = "pallet-balances" version = "28.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -9969,7 +10011,7 @@ dependencies = [ name = "pallet-example-single-block-migrations" version = "0.0.1" dependencies = [ - "docify", + "docify 0.2.7", "frame-executive", "frame-support", "frame-system", @@ -10035,7 +10077,7 @@ dependencies = [ name = "pallet-fast-unstake" version = "27.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -10232,6 +10274,30 @@ dependencies = [ "sp-weights", ] +[[package]] +name = "pallet-migrations" +version = "1.0.0" +dependencies = [ + "docify 0.1.16", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "pretty_assertions", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 14.0.0", + "sp-tracing 16.0.0", + "sp-version", +] + [[package]] name = "pallet-mixnet" version = "0.4.0" @@ -10507,7 +10573,7 @@ dependencies = [ name = "pallet-paged-list" version = "0.6.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -10550,7 +10616,7 @@ dependencies = [ name = "pallet-parameters" version = "0.0.1" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -10711,7 +10777,7 @@ dependencies = [ name = "pallet-safe-mode" version = "9.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -10768,7 +10834,7 @@ dependencies = [ name = "pallet-scheduler" version = "29.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -10981,7 +11047,7 @@ dependencies = [ name = "pallet-sudo" version = "28.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -11012,7 +11078,7 @@ dependencies = [ name = "pallet-timestamp" version = "27.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -11116,7 +11182,7 @@ dependencies = [ name = "pallet-treasury" version = "27.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -11136,7 +11202,7 @@ dependencies = [ name = "pallet-tx-pause" version = "9.0.0" dependencies = [ - "docify", + "docify 0.2.7", "frame-benchmarking", "frame-support", "frame-system", @@ -13404,7 +13470,7 @@ version = "0.0.1" dependencies = [ "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", - "docify", + "docify 0.2.7", "frame", "frame-executive", "frame-support", @@ -15665,7 +15731,7 @@ name = "sc-chain-spec" version = "27.0.0" dependencies = [ "array-bytes 6.1.0", - "docify", + "docify 0.2.7", "log", "memmap2 0.9.3", "parity-scale-codec", @@ -18931,7 +18997,7 @@ dependencies = [ name = "sp-runtime" version = "31.0.1" dependencies = [ - "docify", + "docify 0.2.7", "either", "hash256-std-hasher", "impl-trait-for-tuples", diff --git a/Cargo.toml b/Cargo.toml index 654367cc1e2..48aa25f5c5a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -352,6 +352,7 @@ members = [ "substrate/frame/membership", "substrate/frame/merkle-mountain-range", "substrate/frame/message-queue", + "substrate/frame/migrations", "substrate/frame/mixnet", "substrate/frame/multisig", "substrate/frame/nft-fractionalization", diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index 110f611c676..749fb0367f3 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -3,7 +3,7 @@ use super::*; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU128, ConstU32, Everything}, weights::IdentityFee, }; @@ -47,10 +47,9 @@ parameter_types! { type Balance = u128; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; - type BlockWeights = (); - type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeTask = RuntimeTask; @@ -60,16 +59,8 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; - type DbWeight = (); - type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; type Nonce = u64; type Block = Block; } diff --git a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs index dd8fee4e2ed..6e78fb44672 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs @@ -3,7 +3,7 @@ use super::*; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{Everything, Hooks}, weights::IdentityFee, }; @@ -37,10 +37,9 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; - type BlockWeights = (); - type BlockLength = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeTask = RuntimeTask; @@ -50,16 +49,7 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; - type DbWeight = (); - type Version = (); type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; type Nonce = u64; type Block = Block; } diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index edc3f141b07..de2970dd550 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -2,8 +2,8 @@ // SPDX-FileCopyrightText: 2023 Snowfork use crate as snowbridge_system; use frame_support::{ - parameter_types, - traits::{tokens::fungible::Mutate, ConstU128, ConstU16, ConstU64, ConstU8}, + derive_impl, parameter_types, + traits::{tokens::fungible::Mutate, ConstU128, ConstU64, ConstU8}, weights::IdentityFee, PalletId, }; @@ -95,11 +95,9 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeTask = RuntimeTask; @@ -109,15 +107,8 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; - type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = ConstU16<42>; - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; type Nonce = u64; type Block = Block; } diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs new file mode 100644 index 00000000000..77b5c1aa631 --- /dev/null +++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 Snowfork +use crate as ethereum_beacon_client; +use frame_support::parameter_types; +use pallet_timestamp; +use primitives::{Fork, ForkVersions}; +use sp_core::H256; +use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; + +#[cfg(not(feature = "beacon-spec-mainnet"))] +pub mod minimal { + use super::*; + + use crate::config; + use frame_support::derive_impl; + use hex_literal::hex; + use primitives::CompactExecutionHeader; + use snowbridge_core::inbound::{Log, Proof}; + use sp_runtime::BuildStorage; + use std::{fs::File, path::PathBuf}; + + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test { + System: frame_system::{Pallet, Call, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + EthereumBeaconClient: ethereum_beacon_client::{Pallet, Call, Storage, Event}, + } + ); + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; + } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeTask = RuntimeTask; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type PalletInfo = PalletInfo; + type SS58Prefix = SS58Prefix; + type Nonce = u64; + type Block = Block; + } + + impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = (); + type WeightInfo = (); + } + + parameter_types! { + pub const ExecutionHeadersPruneThreshold: u32 = 10; + pub const ChainForkVersions: ForkVersions = ForkVersions{ + genesis: Fork { + version: [0, 0, 0, 1], // 0x00000001 + epoch: 0, + }, + altair: Fork { + version: [1, 0, 0, 1], // 0x01000001 + epoch: 0, + }, + bellatrix: Fork { + version: [2, 0, 0, 1], // 0x02000001 + epoch: 0, + }, + capella: Fork { + version: [3, 0, 0, 1], // 0x03000001 + epoch: 0, + }, + }; + } + + impl ethereum_beacon_client::Config for Test { + type RuntimeEvent = RuntimeEvent; + type ForkVersions = ChainForkVersions; + type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; + type WeightInfo = (); + } + + // Build genesis storage according to the mock runtime. + pub fn new_tester() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); + ext + } + + fn load_fixture(basename: &str) -> Result + where + T: for<'de> serde::Deserialize<'de>, + { + let filepath: PathBuf = + [env!("CARGO_MANIFEST_DIR"), "tests", "fixtures", basename].iter().collect(); + serde_json::from_reader(File::open(filepath).unwrap()) + } + + pub fn load_execution_header_update_fixture() -> primitives::ExecutionHeaderUpdate { + load_fixture("execution-header-update.minimal.json").unwrap() + } + + pub fn load_checkpoint_update_fixture( + ) -> primitives::CheckpointUpdate<{ config::SYNC_COMMITTEE_SIZE }> { + load_fixture("initial-checkpoint.minimal.json").unwrap() + } + + pub fn load_sync_committee_update_fixture( + ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { + load_fixture("sync-committee-update.minimal.json").unwrap() + } + + pub fn load_finalized_header_update_fixture( + ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { + load_fixture("finalized-header-update.minimal.json").unwrap() + } + + pub fn load_next_sync_committee_update_fixture( + ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { + load_fixture("next-sync-committee-update.minimal.json").unwrap() + } + + pub fn load_next_finalized_header_update_fixture( + ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> { + load_fixture("next-finalized-header-update.minimal.json").unwrap() + } + + pub fn get_message_verification_payload() -> (Log, Proof) { + ( + Log { + address: hex!("ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0").into(), + topics: vec![ + hex!("1b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ad").into(), + hex!("00000000000000000000000000000000000000000000000000000000000003e8").into(), + hex!("0000000000000000000000000000000000000000000000000000000000000001").into(), + ], + data: hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000").into(), + }, + Proof { + block_hash: hex!("05aaa60b0f27cce9e71909508527264b77ee14da7b5bf915fcc4e32715333213").into(), + tx_index: 0, + data: (vec![ + hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb").to_vec(), + hex!("d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c185510").to_vec(), + hex!("b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646").to_vec(), + ], vec![ + hex!("f90131a0b601337b3aa10a671caa724eba641e759399979856141d3aea6b6b4ac59b889ba00c7d5dd48be9060221a02fb8fa213860b4c50d47046c8fa65ffaba5737d569e0a094601b62a1086cd9c9cb71a7ebff9e718f3217fd6e837efe4246733c0a196f63a06a4b0dd0aefc37b3c77828c8f07d1b7a2455ceb5dbfd3c77d7d6aeeddc2f7e8ca0d6e8e23142cdd8ec219e1f5d8b56aa18e456702b195deeaa210327284d42ade4a08a313d4c87023005d1ab631bbfe3f5de1e405d0e66d0bef3e033f1e5711b5521a0bf09a5d9a48b10ade82b8d6a5362a15921c8b5228a3487479b467db97411d82fa0f95cccae2a7c572ef3c566503e30bac2b2feb2d2f26eebf6d870dcf7f8cf59cea0d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c1855108080808080808080").to_vec(), + hex!("f851a0b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646a060a634b9280e3a23fb63375e7bbdd9ab07fd379ab6a67e2312bbc112195fa358808080808080808080808080808080").to_vec(), + hex!("f9030820b9030402f90300018301d6e2b9010000000000000800000000000020040008000000000000000000000000400000008000000000000000000000000000000000000000000000000000000000042010000000001000000000000000000000000000000000040000000000000000000000000000000000000000000000008000000000000000002000000000000000000000000200000000000000200000000000100000000040000001000200008000000000000200000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000f901f5f87a942ffa5ecdbe006d30397c7636d3e015eee251369ff842a0c965575a00553e094ca7c5d14f02e107c258dda06867cbf9e0e69f80e71bbcc1a000000000000000000000000000000000000000000000000000000000000003e8a000000000000000000000000000000000000000000000000000000000000003e8f9011c94ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0f863a01b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ada000000000000000000000000000000000000000000000000000000000000003e8a00000000000000000000000000000000000000000000000000000000000000001b8a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000f858948cf6147918a5cbb672703f879f385036f8793a24e1a01449abf21e49fd025f33495e77f7b1461caefdd3d4bb646424a3f445c4576a5ba0000000000000000000000000440edffa1352b13227e8ee646f3ea37456dec701").to_vec(), + ]), + } + ) + } + + pub fn get_message_verification_header() -> CompactExecutionHeader { + CompactExecutionHeader { + parent_hash: hex!("04a7f6ab8282203562c62f38b0ab41d32aaebe2c7ea687702b463148a6429e04") + .into(), + block_number: 55, + state_root: hex!("894d968712976d613519f973a317cb0781c7b039c89f27ea2b7ca193f7befdb3") + .into(), + receipts_root: hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb") + .into(), + } + } +} + +#[cfg(feature = "beacon-spec-mainnet")] +pub mod mainnet { + use super::*; + use frame_support::derive_impl; + + type Block = frame_system::mocking::MockBlock; + use sp_runtime::BuildStorage; + + frame_support::construct_runtime!( + pub enum Test { + System: frame_system::{Pallet, Call, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + EthereumBeaconClient: ethereum_beacon_client::{Pallet, Call, Storage, Event}, + } + ); + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; + } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeTask = RuntimeTask; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type PalletInfo = PalletInfo; + type SS58Prefix = SS58Prefix; + type Nonce = u64; + type Block = Block; + } + + impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = (); + type WeightInfo = (); + } + + parameter_types! { + pub const ChainForkVersions: ForkVersions = ForkVersions{ + genesis: Fork { + version: [0, 0, 16, 32], // 0x00001020 + epoch: 0, + }, + altair: Fork { + version: [1, 0, 16, 32], // 0x01001020 + epoch: 36660, + }, + bellatrix: Fork { + version: [2, 0, 16, 32], // 0x02001020 + epoch: 112260, + }, + capella: Fork { + version: [3, 0, 16, 32], // 0x03001020 + epoch: 162304, + }, + }; + pub const ExecutionHeadersPruneThreshold: u32 = 10; + } + + impl ethereum_beacon_client::Config for Test { + type RuntimeEvent = RuntimeEvent; + type ForkVersions = ChainForkVersions; + type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold; + type WeightInfo = (); + } + + // Build genesis storage according to the mock runtime. + pub fn new_tester() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000)); + ext + } +} diff --git a/cumulus/parachain-template/runtime/src/lib.rs b/cumulus/parachain-template/runtime/src/lib.rs index cee9b33bf37..004a5d70ebc 100644 --- a/cumulus/parachain-template/runtime/src/lib.rs +++ b/cumulus/parachain-template/runtime/src/lib.rs @@ -560,7 +560,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 6791dc4064e..1c4f49fea0e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1093,7 +1093,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 5352eeb1a1b..e015c7bfca3 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1124,7 +1124,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index db1dfbd45d6..0f6af1b51f1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -816,7 +816,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 1c8c9aa2075..206743ba6ac 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -566,7 +566,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 36eb0e07f87..3ed9d6e8e16 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -803,7 +803,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 541978098ca..c4df5b9a565 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -466,7 +466,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index fcd7576b1e1..0cdfebce742 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -521,7 +521,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 80eb7863803..7797329b526 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -512,7 +512,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 10408aaf39a..e09ad42ea9f 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -332,7 +332,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 90c39891769..616da5519a5 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -495,7 +495,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index a904f7c3521..9f572330cac 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -495,7 +495,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs index ba077ef8879..4cc0a81ef49 100644 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs @@ -300,7 +300,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index 457394760d9..829754731a4 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -357,7 +357,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index bf8dcbc24c8..0030287edb3 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -699,7 +699,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 57969d9a4f1..b69c2341d6b 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -689,7 +689,7 @@ impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs index 76dd7347ccb..7778d1bf7d2 100644 --- a/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs @@ -39,7 +39,7 @@ sp_api::impl_runtime_apis! { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs index 0f01b85ebcf..880f5d760c7 100644 --- a/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs @@ -39,7 +39,7 @@ sp_api::impl_runtime_apis! { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 5fb31410984..5ccec8983e9 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -377,7 +377,7 @@ impl_runtime_apis! { Executive::execute_block(block) } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index ccc3da22400..085ea93fdc7 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -60,7 +60,7 @@ sp_api::impl_runtime_apis! { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/polkadot/runtime/common/src/claims.rs b/polkadot/runtime/common/src/claims.rs index 86550ea8b4e..68f42914e44 100644 --- a/polkadot/runtime/common/src/claims.rs +++ b/polkadot/runtime/common/src/claims.rs @@ -702,7 +702,6 @@ mod tests { use secp_utils::*; use parity_scale_codec::Encode; - use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use crate::claims; @@ -715,11 +714,8 @@ mod tests { }; use pallet_balances; use sp_runtime::{ - traits::{BlakeTwo256, Identity, IdentityLookup}, - transaction_validity::TransactionLongevity, - BuildStorage, - DispatchError::BadOrigin, - TokenError, + traits::Identity, transaction_validity::TransactionLongevity, BuildStorage, + DispatchError::BadOrigin, TokenError, }; type Block = frame_system::mocking::MockBlock; @@ -734,34 +730,13 @@ mod tests { } ); - parameter_types! { - pub const BlockHashCount: u32 = 250; - } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index bbc79a13d37..173301e1ad9 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1768,7 +1768,7 @@ sp_api::impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index b74def5de8a..6c899c52701 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -767,7 +767,7 @@ sp_api::impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 574710b3110..91047d953f5 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1793,7 +1793,7 @@ sp_api::impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/prdoc/pr_1781.prdoc b/prdoc/pr_1781.prdoc new file mode 100644 index 00000000000..e3560842d15 --- /dev/null +++ b/prdoc/pr_1781.prdoc @@ -0,0 +1,45 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Multi-Block-Migrations, `poll` hook and new System Callbacks" + +doc: + - audience: Runtime Dev + description: | + The major things that this MR touches are: + + **Multi-Block-Migrations**: `pallet-migrations` is introduced that can be configured in the + `System` of a runtime to act as multi-block migrator. The `migrations` pallet then in turn + receives the list of MBMs as config parameter. The list of migrations can be an aggregated + tuple of `SteppedMigration` trait implementation. + It is paramount that the `migrations` pallet is configured in `System` once it is deployed. A + test is in place to double check this. + + To integrate this into your runtime, it is only necessary to change the return type of + `initialize_block` to `RuntimeExecutiveMode`. For extended info please see + https://github.com/paritytech/polkadot-sdk/pull/1781. + + **poll**: a new pallet hook named `poll` is added. This can be used for places where the code + that should be executed is not deadline critical. Runtime devs are advised to skim their usage + of `on_initialize` and `on_finalize` to see whether they can be replace with `poll`. `poll` is + not guaranteed to be called each block. In fact it will not be called when MBMs are ongoing. + + **System Callbacks**: The `system` pallet gets five new config items - all of which can be + safely set to `()` as default. They are: + - `SingleBlockMigrations`: replaces the `Executive` now for configuring migrations. + - `MultiBlockMigrator`: the `pallet-migrations` would be set here, if deployed. + - `PreInherents`: a hook that runs before any inherent. + - `PostInherents`: a hook to run between inherents and `poll`/MBM logic. + - `PostTransactions`: a hook to run after all transactions but before `on_idle`. + +crates: + - name: frame-executive + - name: frame-system + - name: frame-support + - name: frame-support-procedural + - name: pallet-migrations + - name: sc-basic-authorship + - name: sc-block-builder + - name: sp-api + - name: sp-api-proc-macro + - name: sp-runtime diff --git a/substrate/bin/minimal/runtime/src/lib.rs b/substrate/bin/minimal/runtime/src/lib.rs index 610289693d9..fb996aef46b 100644 --- a/substrate/bin/minimal/runtime/src/lib.rs +++ b/substrate/bin/minimal/runtime/src/lib.rs @@ -26,7 +26,8 @@ use frame::{ prelude::*, runtime::{ apis::{ - self, impl_runtime_apis, ApplyExtrinsicResult, CheckInherentsResult, OpaqueMetadata, + self, impl_runtime_apis, ApplyExtrinsicResult, CheckInherentsResult, + ExtrinsicInclusionMode, OpaqueMetadata, }, prelude::*, }, @@ -121,7 +122,7 @@ impl_runtime_apis! { RuntimeExecutive::execute_block(block) } - fn initialize_block(header: &Header) { + fn initialize_block(header: &Header) -> ExtrinsicInclusionMode { RuntimeExecutive::initialize_block(header) } } diff --git a/substrate/bin/node-template/runtime/src/lib.rs b/substrate/bin/node-template/runtime/src/lib.rs index 3b6a74be251..159697f427f 100644 --- a/substrate/bin/node-template/runtime/src/lib.rs +++ b/substrate/bin/node-template/runtime/src/lib.rs @@ -331,7 +331,7 @@ impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index aa13c3d292b..9607d05daf0 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -88,6 +88,7 @@ pallet-election-provider-support-benchmarking = { path = "../../../frame/electio pallet-elections-phragmen = { path = "../../../frame/elections-phragmen", default-features = false } pallet-example-tasks = { path = "../../../frame/examples/tasks", default-features = false } pallet-fast-unstake = { path = "../../../frame/fast-unstake", default-features = false } +pallet-migrations = { path = "../../../frame/migrations", default-features = false } pallet-nis = { path = "../../../frame/nis", default-features = false } pallet-grandpa = { path = "../../../frame/grandpa", default-features = false } pallet-im-online = { path = "../../../frame/im-online", default-features = false } @@ -198,6 +199,7 @@ std = [ "pallet-lottery/std", "pallet-membership/std", "pallet-message-queue/std", + "pallet-migrations/std", "pallet-mixnet/std", "pallet-mmr/std", "pallet-multisig/std", @@ -302,6 +304,7 @@ runtime-benchmarks = [ "pallet-lottery/runtime-benchmarks", "pallet-membership/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", + "pallet-migrations/runtime-benchmarks", "pallet-mixnet/runtime-benchmarks", "pallet-mmr/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", @@ -381,6 +384,7 @@ try-runtime = [ "pallet-lottery/try-runtime", "pallet-membership/try-runtime", "pallet-message-queue/try-runtime", + "pallet-migrations/try-runtime", "pallet-mixnet/try-runtime", "pallet-mmr/try-runtime", "pallet-multisig/try-runtime", diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index b34d8c89e56..5431628c747 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -310,6 +310,7 @@ impl frame_system::Config for Runtime { type SystemWeightInfo = frame_system::weights::SubstrateWeight; type SS58Prefix = ConstU16<42>; type MaxConsumers = ConstU32<16>; + type MultiBlockMigrator = MultiBlockMigrations; } impl pallet_insecure_randomness_collective_flip::Config for Runtime {} @@ -2007,6 +2008,25 @@ impl pallet_statement::Config for Runtime { type MaxAllowedBytes = MaxAllowedBytes; } +parameter_types! { + pub MbmServiceWeight: Weight = Perbill::from_percent(80) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_migrations::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + #[cfg(not(feature = "runtime-benchmarks"))] + type Migrations = (); + // Benchmarks need mocked migrations to guarantee that they succeed. + #[cfg(feature = "runtime-benchmarks")] + type Migrations = pallet_migrations::mock_helpers::MockedMigrations; + type CursorMaxLen = ConstU32<65_536>; + type IdentifierMaxLen = ConstU32<256>; + type MigrationStatusHandler = (); + type FailedMigrationHandler = frame_support::migrations::FreezeChainOnFailedMigration; + type MaxServiceWeight = MbmServiceWeight; + type WeightInfo = pallet_migrations::weights::SubstrateWeight; +} + parameter_types! { pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); } @@ -2241,6 +2261,7 @@ construct_runtime!( TxPause: pallet_tx_pause, SafeMode: pallet_safe_mode, Statement: pallet_statement, + MultiBlockMigrations: pallet_migrations, Broker: pallet_broker, TasksExample: pallet_example_tasks, Mixnet: pallet_mixnet, @@ -2372,6 +2393,7 @@ mod benches { [pallet_lottery, Lottery] [pallet_membership, TechnicalMembership] [pallet_message_queue, MessageQueue] + [pallet_migrations, MultiBlockMigrations] [pallet_mmr, Mmr] [pallet_multisig, Multisig] [pallet_nomination_pools, NominationPoolsBench::] @@ -2417,7 +2439,7 @@ impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header) } } diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index fdc3d4918de..932287ac865 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -38,7 +38,7 @@ use sp_core::traits::SpawnNamed; use sp_inherents::InherentData; use sp_runtime::{ traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT}, - Digest, Percent, SaturatedConversion, + Digest, ExtrinsicInclusionMode, Percent, SaturatedConversion, }; use std::{marker::PhantomData, pin::Pin, sync::Arc, time}; @@ -335,11 +335,12 @@ where self.apply_inherents(&mut block_builder, inherent_data)?; - // TODO call `after_inherents` and check if we should apply extrinsincs here - // - - let end_reason = - self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?; + let mode = block_builder.extrinsic_inclusion_mode(); + let end_reason = match mode { + ExtrinsicInclusionMode::AllExtrinsics => + self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?, + ExtrinsicInclusionMode::OnlyInherents => EndProposingReason::TransactionForbidden, + }; let (block, storage_changes, proof) = block_builder.build()?.into_inner(); let block_took = block_timer.elapsed(); diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index 258e39d962b..2f22cd42591 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -37,7 +37,7 @@ use sp_core::traits::CallContext; use sp_runtime::{ legacy, traits::{Block as BlockT, Hash, HashingFor, Header as HeaderT, NumberFor, One}, - Digest, + Digest, ExtrinsicInclusionMode, }; use std::marker::PhantomData; @@ -198,10 +198,12 @@ pub struct BlockBuilder<'a, Block: BlockT, C: ProvideRuntimeApi + 'a> { extrinsics: Vec, api: ApiRef<'a, C::Api>, call_api_at: &'a C, + /// Version of the [`BlockBuilderApi`] runtime API. version: u32, parent_hash: Block::Hash, /// The estimated size of the block header. estimated_header_size: usize, + extrinsic_inclusion_mode: ExtrinsicInclusionMode, } impl<'a, Block, C> BlockBuilder<'a, Block, C> @@ -244,9 +246,19 @@ where api.set_call_context(CallContext::Onchain); - api.initialize_block(parent_hash, &header)?; + let core_version = api + .api_version::>(parent_hash)? + .ok_or_else(|| Error::VersionInvalid("Core".to_string()))?; - let version = api + let extrinsic_inclusion_mode = if core_version >= 5 { + api.initialize_block(parent_hash, &header)? + } else { + #[allow(deprecated)] + api.initialize_block_before_version_5(parent_hash, &header)?; + ExtrinsicInclusionMode::AllExtrinsics + }; + + let bb_version = api .api_version::>(parent_hash)? .ok_or_else(|| Error::VersionInvalid("BlockBuilderApi".to_string()))?; @@ -254,12 +266,18 @@ where parent_hash, extrinsics: Vec::new(), api, - version, + version: bb_version, estimated_header_size, call_api_at, + extrinsic_inclusion_mode, }) } + /// The extrinsic inclusion mode of the runtime for this block. + pub fn extrinsic_inclusion_mode(&self) -> ExtrinsicInclusionMode { + self.extrinsic_inclusion_mode + } + /// Push onto the block's list of extrinsics. /// /// This will ensure the extrinsic can be validly executed (by executing it). diff --git a/substrate/client/proposer-metrics/src/lib.rs b/substrate/client/proposer-metrics/src/lib.rs index 012e8ca769a..2856300cf80 100644 --- a/substrate/client/proposer-metrics/src/lib.rs +++ b/substrate/client/proposer-metrics/src/lib.rs @@ -44,11 +44,14 @@ impl MetricsLink { } /// The reason why proposing a block ended. +#[derive(Clone, Copy, PartialEq, Eq)] pub enum EndProposingReason { NoMoreTransactions, HitDeadline, HitBlockSizeLimit, HitBlockWeightLimit, + /// No transactions are allowed in the block. + TransactionForbidden, } /// Authorship metrics. @@ -112,6 +115,7 @@ impl Metrics { EndProposingReason::NoMoreTransactions => "no_more_transactions", EndProposingReason::HitBlockSizeLimit => "hit_block_size_limit", EndProposingReason::HitBlockWeightLimit => "hit_block_weight_limit", + EndProposingReason::TransactionForbidden => "transactions_forbidden", }; self.end_proposing_reason.with_label_values(&[reason]).inc(); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 9544736d84c..89d8c4ce271 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -242,12 +242,12 @@ async fn follow_with_runtime() { let event: FollowEvent = get_next_event(&mut sub).await; // it is basically json-encoded substrate_test_runtime_client::runtime::VERSION - let runtime_str = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":0,\ - \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",4],\ + let runtime_str = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ + \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",5],\ [\"0x37e397fc7c91f5e4\",2],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\ [\"0xbc9d89904f5b923f\",1],[\"0xc6e9a76309f39b09\",2],[\"0xdd718d5cc53262d4\",1],\ [\"0xcbca25e39f142387\",2],[\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],\ - [\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"stateVersion\":0}"; + [\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"stateVersion\":1}"; let runtime: RuntimeVersion = serde_json::from_str(runtime_str).unwrap(); diff --git a/substrate/client/rpc/src/state/tests.rs b/substrate/client/rpc/src/state/tests.rs index 96f4c1be960..dd866e671c5 100644 --- a/substrate/client/rpc/src/state/tests.rs +++ b/substrate/client/rpc/src/state/tests.rs @@ -475,7 +475,7 @@ async fn should_return_runtime_version() { // it is basically json-encoded substrate_test_runtime_client::runtime::VERSION let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ - \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",4],\ + \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",5],\ [\"0x37e397fc7c91f5e4\",2],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\ [\"0xbc9d89904f5b923f\",1],[\"0xc6e9a76309f39b09\",2],[\"0xdd718d5cc53262d4\",1],\ [\"0xcbca25e39f142387\",2],[\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],\ diff --git a/substrate/frame/executive/Cargo.toml b/substrate/frame/executive/Cargo.toml index a4ca265f617..63285e4cb49 100644 --- a/substrate/frame/executive/Cargo.toml +++ b/substrate/frame/executive/Cargo.toml @@ -16,6 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +aquamarine = "0.3.2" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } @@ -44,6 +45,7 @@ default = ["std"] with-tracing = ["sp-tracing/with-tracing"] std = [ "codec/std", + "frame-support/experimental", "frame-support/std", "frame-system/std", "frame-try-runtime/std", diff --git a/substrate/frame/executive/src/lib.rs b/substrate/frame/executive/src/lib.rs index 48ff675f808..3028eaf318e 100644 --- a/substrate/frame/executive/src/lib.rs +++ b/substrate/frame/executive/src/lib.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#![cfg_attr(not(feature = "std"), no_std)] + //! # Executive Module //! //! The Executive module acts as the orchestration layer for the runtime. It dispatches incoming @@ -35,6 +37,8 @@ //! - Finalize a block. //! - Start an off-chain worker. //! +//! The flow of their application in a block is explained in the [block flowchart](block_flowchart). +//! //! ### Implementations //! //! The Executive module provides the following implementations: @@ -114,17 +118,51 @@ //! pub type Executive = executive::Executive; //! ``` -#![cfg_attr(not(feature = "std"), no_std)] +#[cfg(doc)] +#[cfg_attr(doc, aquamarine::aquamarine)] +/// # Block Execution +/// +/// These are the steps of block execution as done by [`Executive::execute_block`]. A block is +/// invalid if any of them fail. +/// +/// ```mermaid +/// flowchart TD +/// Executive::execute_block --> on_runtime_upgrade +/// on_runtime_upgrade --> System::initialize +/// Executive::initialize_block --> System::initialize +/// System::initialize --> on_initialize +/// on_initialize --> PreInherents[System::PreInherents] +/// PreInherents --> Inherents[Apply Inherents] +/// Inherents --> PostInherents[System::PostInherents] +/// PostInherents --> Check{MBM ongoing?} +/// Check -->|No| poll +/// Check -->|Yes| post_transactions_2[System::PostTransaction] +/// post_transactions_2 --> Step[MBMs::step] +/// Step --> on_finalize +/// poll --> transactions[Apply Transactions] +/// transactions --> post_transactions_1[System::PostTransaction] +/// post_transactions_1 --> CheckIdle{Weight remaining?} +/// CheckIdle -->|Yes| on_idle +/// CheckIdle -->|No| on_finalize +/// on_idle --> on_finalize +/// ``` +pub mod block_flowchart {} + +#[cfg(test)] +mod tests; use codec::{Codec, Encode}; use frame_support::{ + defensive_assert, dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo}, + migrations::MultiStepMigrator, pallet_prelude::InvalidTransaction, traits::{ BeforeAllRuntimeMigrations, EnsureInherentsAreFirst, ExecuteBlock, OffchainWorker, - OnFinalize, OnIdle, OnInitialize, OnRuntimeUpgrade, + OnFinalize, OnIdle, OnInitialize, OnPoll, OnRuntimeUpgrade, PostInherents, + PostTransactions, PreInherents, }, - weights::Weight, + weights::{Weight, WeightMeter}, }; use frame_system::pallet_prelude::BlockNumberFor; use sp_runtime::{ @@ -134,7 +172,7 @@ use sp_runtime::{ ValidateUnsigned, Zero, }, transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, + ApplyExtrinsicResult, ExtrinsicInclusionMode, }; use sp_std::{marker::PhantomData, prelude::*}; @@ -198,7 +236,8 @@ impl< + OnInitialize> + OnIdle> + OnFinalize> - + OffchainWorker>, + + OffchainWorker> + + OnPoll>, COnRuntimeUpgrade: OnRuntimeUpgrade, > ExecuteBlock for Executive @@ -237,6 +276,7 @@ impl< + OnIdle> + OnFinalize> + OffchainWorker> + + OnPoll> + TryState> + TryDecodeEntireStorage, COnRuntimeUpgrade: OnRuntimeUpgrade, @@ -272,36 +312,50 @@ where select, ); - Self::initialize_block(block.header()); - Self::initial_checks(&block); - + let mode = Self::initialize_block(block.header()); + let num_inherents = Self::initial_checks(&block) as usize; let (header, extrinsics) = block.deconstruct(); + // Check if there are any forbidden non-inherents in the block. + if mode == ExtrinsicInclusionMode::OnlyInherents && extrinsics.len() > num_inherents { + return Err("Only inherents allowed".into()) + } + let try_apply_extrinsic = |uxt: Block::Extrinsic| -> ApplyExtrinsicResult { sp_io::init_tracing(); let encoded = uxt.encode(); let encoded_len = encoded.len(); + let is_inherent = System::is_inherent(&uxt); // skip signature verification. let xt = if signature_check { uxt.check(&Default::default()) } else { uxt.unchecked_into_checked_i_know_what_i_am_doing(&Default::default()) }?; - >::note_extrinsic(encoded); let dispatch_info = xt.get_dispatch_info(); + if !is_inherent && !>::inherents_applied() { + Self::inherents_applied(); + } + + >::note_extrinsic(encoded); let r = Applyable::apply::(xt, &dispatch_info, encoded_len)?; + if r.is_err() && dispatch_info.class == DispatchClass::Mandatory { + return Err(InvalidTransaction::BadMandatory.into()) + } + >::note_applied_extrinsic(&r, dispatch_info); Ok(r.map(|_| ()).map_err(|e| e.error)) }; - for e in extrinsics { + // Apply extrinsics: + for e in extrinsics.iter() { if let Err(err) = try_apply_extrinsic(e.clone()) { log::error!( - target: LOG_TARGET, "executing transaction {:?} failed due to {:?}. Aborting the rest of the block execution.", + target: LOG_TARGET, "transaction {:?} failed due to {:?}. Aborting the rest of the block execution.", e, err, ); @@ -309,9 +363,17 @@ where } } + // In this case there were no transactions to trigger this state transition: + if !>::inherents_applied() { + Self::inherents_applied(); + } + // post-extrinsics book-keeping >::note_finished_extrinsics(); - Self::idle_and_finalize_hook(*header.number()); + ::PostTransactions::post_transactions(); + + Self::on_idle_hook(*header.number()); + Self::on_finalize_hook(*header.number()); // run the try-state checks of all pallets, ensuring they don't alter any state. let _guard = frame_support::StorageNoopGuard::default(); @@ -449,7 +511,8 @@ impl< + OnInitialize> + OnIdle> + OnFinalize> - + OffchainWorker>, + + OffchainWorker> + + OnPoll>, COnRuntimeUpgrade: OnRuntimeUpgrade, > Executive where @@ -464,16 +527,36 @@ where pub fn execute_on_runtime_upgrade() -> Weight { let before_all_weight = ::before_all_runtime_migrations(); - <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::on_runtime_upgrade() - .saturating_add(before_all_weight) + + let runtime_upgrade_weight = <( + COnRuntimeUpgrade, + ::SingleBlockMigrations, + // We want to run the migrations before we call into the pallets as they may + // access any state that would then not be migrated. + AllPalletsWithSystem, + ) as OnRuntimeUpgrade>::on_runtime_upgrade(); + + before_all_weight.saturating_add(runtime_upgrade_weight) } /// Start the execution of a particular block. - pub fn initialize_block(header: &frame_system::pallet_prelude::HeaderFor) { + pub fn initialize_block( + header: &frame_system::pallet_prelude::HeaderFor, + ) -> ExtrinsicInclusionMode { sp_io::init_tracing(); sp_tracing::enter_span!(sp_tracing::Level::TRACE, "init_block"); let digests = Self::extract_pre_digest(header); Self::initialize_block_impl(header.number(), header.parent_hash(), &digests); + + Self::extrinsic_mode() + } + + fn extrinsic_mode() -> ExtrinsicInclusionMode { + if ::MultiBlockMigrator::ongoing() { + ExtrinsicInclusionMode::OnlyInherents + } else { + ExtrinsicInclusionMode::AllExtrinsics + } } fn extract_pre_digest(header: &frame_system::pallet_prelude::HeaderFor) -> Digest { @@ -519,6 +602,7 @@ where ); frame_system::Pallet::::note_finished_initialize(); + ::PreInherents::pre_inherents(); } /// Returns if the runtime has been upgraded, based on [`frame_system::LastRuntimeUpgrade`]. @@ -529,7 +613,8 @@ where last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) } - fn initial_checks(block: &Block) { + /// Returns the number of inherents in the block. + fn initial_checks(block: &Block) -> u32 { sp_tracing::enter_span!(sp_tracing::Level::TRACE, "initial_checks"); let header = block.header(); @@ -542,8 +627,9 @@ where "Parent hash should be valid.", ); - if let Err(i) = System::ensure_inherents_are_first(block) { - panic!("Invalid inherent position for extrinsic at index {}", i); + match System::ensure_inherents_are_first(block) { + Ok(num) => num, + Err(i) => panic!("Invalid inherent position for extrinsic at index {}", i), } } @@ -552,53 +638,90 @@ where sp_io::init_tracing(); sp_tracing::within_span! { sp_tracing::info_span!("execute_block", ?block); + // Execute `on_runtime_upgrade` and `on_initialize`. + let mode = Self::initialize_block(block.header()); + let num_inherents = Self::initial_checks(&block) as usize; + let (header, extrinsics) = block.deconstruct(); + let num_extrinsics = extrinsics.len(); - Self::initialize_block(block.header()); + if mode == ExtrinsicInclusionMode::OnlyInherents && num_extrinsics > num_inherents { + // Invalid block + panic!("Only inherents are allowed in this block") + } - // any initial checks - Self::initial_checks(&block); + Self::apply_extrinsics(extrinsics.into_iter()); - // execute extrinsics - let (header, extrinsics) = block.deconstruct(); - Self::execute_extrinsics_with_book_keeping(extrinsics, *header.number()); + // In this case there were no transactions to trigger this state transition: + if !>::inherents_applied() { + defensive_assert!(num_inherents == num_extrinsics); + Self::inherents_applied(); + } - // any final checks + >::note_finished_extrinsics(); + ::PostTransactions::post_transactions(); + + Self::on_idle_hook(*header.number()); + Self::on_finalize_hook(*header.number()); Self::final_checks(&header); } } - /// Execute given extrinsics and take care of post-extrinsics book-keeping. - fn execute_extrinsics_with_book_keeping( - extrinsics: Vec, - block_number: NumberFor, - ) { + /// Logic that runs directly after inherent application. + /// + /// It advances the Multi-Block-Migrations or runs the `on_poll` hook. + pub fn inherents_applied() { + >::note_inherents_applied(); + ::PostInherents::post_inherents(); + + if ::MultiBlockMigrator::ongoing() { + let used_weight = ::MultiBlockMigrator::step(); + >::register_extra_weight_unchecked( + used_weight, + DispatchClass::Mandatory, + ); + } else { + let block_number = >::block_number(); + Self::on_poll_hook(block_number); + } + } + + /// Execute given extrinsics. + fn apply_extrinsics(extrinsics: impl Iterator) { extrinsics.into_iter().for_each(|e| { if let Err(e) = Self::apply_extrinsic(e) { let err: &'static str = e.into(); panic!("{}", err) } }); - - // post-extrinsics book-keeping - >::note_finished_extrinsics(); - - Self::idle_and_finalize_hook(block_number); } /// Finalize the block - it is up the caller to ensure that all header fields are valid /// except state-root. + // Note: Only used by the block builder - not Executive itself. pub fn finalize_block() -> frame_system::pallet_prelude::HeaderFor { sp_io::init_tracing(); sp_tracing::enter_span!(sp_tracing::Level::TRACE, "finalize_block"); - >::note_finished_extrinsics(); - let block_number = >::block_number(); - Self::idle_and_finalize_hook(block_number); + // In this case there were no transactions to trigger this state transition: + if !>::inherents_applied() { + Self::inherents_applied(); + } + >::note_finished_extrinsics(); + ::PostTransactions::post_transactions(); + let block_number = >::block_number(); + Self::on_idle_hook(block_number); + Self::on_finalize_hook(block_number); >::finalize() } - fn idle_and_finalize_hook(block_number: NumberFor) { + /// Run the `on_idle` hook of all pallet, but only if there is weight remaining and there are no + /// ongoing MBMs. + fn on_idle_hook(block_number: NumberFor) { + if ::MultiBlockMigrator::ongoing() { + return + } + let weight = >::block_weight(); let max_weight = >::get().max_block; let remaining_weight = max_weight.saturating_sub(weight.total()); @@ -613,7 +736,33 @@ where DispatchClass::Mandatory, ); } + } + + fn on_poll_hook(block_number: NumberFor) { + defensive_assert!( + !::MultiBlockMigrator::ongoing(), + "on_poll should not be called during migrations" + ); + let weight = >::block_weight(); + let max_weight = >::get().max_block; + let remaining = max_weight.saturating_sub(weight.total()); + + if remaining.all_gt(Weight::zero()) { + let mut meter = WeightMeter::with_limit(remaining); + >>::on_poll( + block_number, + &mut meter, + ); + >::register_extra_weight_unchecked( + meter.consumed(), + DispatchClass::Mandatory, + ); + } + } + + /// Run the `on_finalize` hook of all pallet. + fn on_finalize_hook(block_number: NumberFor) { >>::on_finalize(block_number); } @@ -627,8 +776,18 @@ where let encoded_len = encoded.len(); sp_tracing::enter_span!(sp_tracing::info_span!("apply_extrinsic", ext=?sp_core::hexdisplay::HexDisplay::from(&encoded))); + + // We use the dedicated `is_inherent` check here, since just relying on `Mandatory` dispatch + // class does not capture optional inherents. + let is_inherent = System::is_inherent(&uxt); + // Verify that the signature is good. let xt = uxt.check(&Default::default())?; + let dispatch_info = xt.get_dispatch_info(); + + if !is_inherent && !>::inherents_applied() { + Self::inherents_applied(); + } // We don't need to make sure to `note_extrinsic` only after we know it's going to be // executed to prevent it from leaking in storage since at this point, it will either @@ -637,8 +796,6 @@ where // AUDIT: Under no circumstances may this function panic from here onwards. - // Decode parameters and dispatch - let dispatch_info = xt.get_dispatch_info(); let r = Applyable::apply::(xt, &dispatch_info, encoded_len)?; // Mandatory(inherents) are not allowed to fail. @@ -745,956 +902,3 @@ where ) } } - -#[cfg(test)] -mod tests { - use super::*; - - use sp_core::H256; - use sp_runtime::{ - generic::{DigestItem, Era}, - testing::{Block, Digest, Header}, - traits::{BlakeTwo256, Block as BlockT, Header as HeaderT, IdentityLookup}, - transaction_validity::{ - InvalidTransaction, TransactionValidityError, UnknownTransaction, ValidTransaction, - }, - BuildStorage, DispatchError, - }; - - use frame_support::{ - assert_err, derive_impl, parameter_types, - traits::{fungible, ConstU32, ConstU64, ConstU8, Currency}, - weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight, WeightToFee}, - }; - use frame_system::{ChainContext, LastRuntimeUpgrade, LastRuntimeUpgradeInfo}; - use pallet_balances::Call as BalancesCall; - use pallet_transaction_payment::CurrencyAdapter; - - const TEST_KEY: &[u8] = b":test:key:"; - - #[frame_support::pallet(dev_mode)] - mod custom { - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::pallet] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::hooks] - impl Hooks> for Pallet { - // module hooks. - // one with block number arg and one without - fn on_initialize(n: BlockNumberFor) -> Weight { - println!("on_initialize({})", n); - Weight::from_parts(175, 0) - } - - fn on_idle(n: BlockNumberFor, remaining_weight: Weight) -> Weight { - println!("on_idle{}, {})", n, remaining_weight); - Weight::from_parts(175, 0) - } - - fn on_finalize(n: BlockNumberFor) { - println!("on_finalize({})", n); - } - - fn on_runtime_upgrade() -> Weight { - sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); - Weight::from_parts(200, 0) - } - - fn offchain_worker(n: BlockNumberFor) { - assert_eq!(BlockNumberFor::::from(1u32), n); - } - } - - #[pallet::call] - impl Pallet { - pub fn some_function(origin: OriginFor) -> DispatchResult { - // NOTE: does not make any different. - frame_system::ensure_signed(origin)?; - Ok(()) - } - - #[pallet::weight((200, DispatchClass::Operational))] - pub fn some_root_operation(origin: OriginFor) -> DispatchResult { - frame_system::ensure_root(origin)?; - Ok(()) - } - - pub fn some_unsigned_message(origin: OriginFor) -> DispatchResult { - frame_system::ensure_none(origin)?; - Ok(()) - } - - pub fn allowed_unsigned(origin: OriginFor) -> DispatchResult { - frame_system::ensure_root(origin)?; - Ok(()) - } - - pub fn unallowed_unsigned(origin: OriginFor) -> DispatchResult { - frame_system::ensure_root(origin)?; - Ok(()) - } - - #[pallet::weight((0, DispatchClass::Mandatory))] - pub fn inherent_call(origin: OriginFor) -> DispatchResult { - frame_system::ensure_none(origin)?; - Ok(()) - } - - pub fn calculate_storage_root(_origin: OriginFor) -> DispatchResult { - let root = sp_io::storage::root(sp_runtime::StateVersion::V1); - sp_io::storage::set("storage_root".as_bytes(), &root); - Ok(()) - } - } - - #[pallet::inherent] - impl ProvideInherent for Pallet { - type Call = Call; - - type Error = sp_inherents::MakeFatalError<()>; - - const INHERENT_IDENTIFIER: [u8; 8] = *b"test1234"; - - fn create_inherent(_data: &InherentData) -> Option { - None - } - - fn is_inherent(call: &Self::Call) -> bool { - *call == Call::::inherent_call {} - } - } - - #[pallet::validate_unsigned] - impl ValidateUnsigned for Pallet { - type Call = Call; - - // Inherent call is accepted for being dispatched - fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { - match call { - Call::allowed_unsigned { .. } => Ok(()), - Call::inherent_call { .. } => Ok(()), - _ => Err(UnknownTransaction::NoUnsignedValidator.into()), - } - } - - // Inherent call is not validated as unsigned - fn validate_unsigned( - _source: TransactionSource, - call: &Self::Call, - ) -> TransactionValidity { - match call { - Call::allowed_unsigned { .. } => Ok(Default::default()), - _ => UnknownTransaction::NoUnsignedValidator.into(), - } - } - } - } - - frame_support::construct_runtime!( - pub enum Runtime { - System: frame_system, - Balances: pallet_balances, - TransactionPayment: pallet_transaction_payment, - Custom: custom, - } - ); - - parameter_types! { - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::builder() - .base_block(Weight::from_parts(10, 0)) - .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = Weight::from_parts(5, 0)) - .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = Weight::from_parts(1024, u64::MAX).into()) - .build_or_panic(); - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 10, - write: 100, - }; - } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] - impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = BlockWeights; - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = sp_core::H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Block = TestBlock; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = RuntimeVersion; - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; - } - - type Balance = u64; - impl pallet_balances::Config for Runtime { - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ConstU64<1>; - type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = (); - type RuntimeFreezeReason = (); - } - - parameter_types! { - pub const TransactionByteFee: Balance = 0; - } - impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type OnChargeTransaction = CurrencyAdapter; - type OperationalFeeMultiplier = ConstU8<5>; - type WeightToFee = IdentityFee; - type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = (); - } - impl custom::Config for Runtime {} - - pub struct RuntimeVersion; - impl frame_support::traits::Get for RuntimeVersion { - fn get() -> sp_version::RuntimeVersion { - RuntimeVersionTestValues::get().clone() - } - } - - parameter_types! { - pub static RuntimeVersionTestValues: sp_version::RuntimeVersion = - Default::default(); - } - - type SignedExtra = ( - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, - ); - type TestXt = sp_runtime::testing::TestXt; - type TestBlock = Block; - - // Will contain `true` when the custom runtime logic was called. - const CUSTOM_ON_RUNTIME_KEY: &[u8] = b":custom:on_runtime"; - - struct CustomOnRuntimeUpgrade; - impl OnRuntimeUpgrade for CustomOnRuntimeUpgrade { - fn on_runtime_upgrade() -> Weight { - sp_io::storage::set(TEST_KEY, "custom_upgrade".as_bytes()); - sp_io::storage::set(CUSTOM_ON_RUNTIME_KEY, &true.encode()); - System::deposit_event(frame_system::Event::CodeUpdated); - - assert_eq!(0, System::last_runtime_upgrade_spec_version()); - - Weight::from_parts(100, 0) - } - } - - type Executive = super::Executive< - Runtime, - Block, - ChainContext, - Runtime, - AllPalletsWithSystem, - CustomOnRuntimeUpgrade, - >; - - fn extra(nonce: u64, fee: Balance) -> SignedExtra { - ( - frame_system::CheckEra::from(Era::Immortal), - frame_system::CheckNonce::from(nonce), - frame_system::CheckWeight::new(), - pallet_transaction_payment::ChargeTransactionPayment::from(fee), - ) - } - - fn sign_extra(who: u64, nonce: u64, fee: Balance) -> Option<(u64, SignedExtra)> { - Some((who, extra(nonce, fee))) - } - - fn call_transfer(dest: u64, value: u64) -> RuntimeCall { - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value }) - } - - #[test] - fn balance_transfer_dispatch_works() { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - pallet_balances::GenesisConfig:: { balances: vec![(1, 211)] } - .assimilate_storage(&mut t) - .unwrap(); - let xt = TestXt::new(call_transfer(2, 69), sign_extra(1, 0, 0)); - let weight = xt.get_dispatch_info().weight + - ::BlockWeights::get() - .get(DispatchClass::Normal) - .base_extrinsic; - let fee: Balance = - ::WeightToFee::weight_to_fee(&weight); - let mut t = sp_io::TestExternalities::new(t); - t.execute_with(|| { - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - let r = Executive::apply_extrinsic(xt); - assert!(r.is_ok()); - assert_eq!(>::total_balance(&1), 142 - fee); - assert_eq!(>::total_balance(&2), 69); - }); - } - - fn new_test_ext(balance_factor: Balance) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } - .assimilate_storage(&mut t) - .unwrap(); - t.into() - } - - fn new_test_ext_v0(balance_factor: Balance) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } - .assimilate_storage(&mut t) - .unwrap(); - (t, sp_runtime::StateVersion::V0).into() - } - - #[test] - fn block_import_works() { - block_import_works_inner( - new_test_ext_v0(1), - array_bytes::hex_n_into_unchecked( - "65e953676859e7a33245908af7ad3637d6861eb90416d433d485e95e2dd174a1", - ), - ); - block_import_works_inner( - new_test_ext(1), - array_bytes::hex_n_into_unchecked( - "5a19b3d6fdb7241836349fdcbe2d9df4d4f945b949d979e31ad50bff1cbcd1c2", - ), - ); - } - fn block_import_works_inner(mut ext: sp_io::TestExternalities, state_root: H256) { - ext.execute_with(|| { - Executive::execute_block(Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root, - extrinsics_root: array_bytes::hex_n_into_unchecked( - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", - ), - digest: Digest { logs: vec![] }, - }, - extrinsics: vec![], - }); - }); - } - - #[test] - #[should_panic] - fn block_import_of_bad_state_root_fails() { - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root: [0u8; 32].into(), - extrinsics_root: array_bytes::hex_n_into_unchecked( - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", - ), - digest: Digest { logs: vec![] }, - }, - extrinsics: vec![], - }); - }); - } - - #[test] - #[should_panic] - fn block_import_of_bad_extrinsic_root_fails() { - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root: array_bytes::hex_n_into_unchecked( - "75e7d8f360d375bbe91bcf8019c01ab6362448b4a89e3b329717eb9d910340e5", - ), - extrinsics_root: [0u8; 32].into(), - digest: Digest { logs: vec![] }, - }, - extrinsics: vec![], - }); - }); - } - - #[test] - fn bad_extrinsic_not_inserted() { - let mut t = new_test_ext(1); - // bad nonce check! - let xt = TestXt::new(call_transfer(33, 69), sign_extra(1, 30, 0)); - t.execute_with(|| { - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - assert_err!( - Executive::apply_extrinsic(xt), - TransactionValidityError::Invalid(InvalidTransaction::Future) - ); - assert_eq!(>::extrinsic_index(), Some(0)); - }); - } - - #[test] - fn block_weight_limit_enforced() { - let mut t = new_test_ext(10000); - // given: TestXt uses the encoded len as fixed Len: - let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), - ); - let encoded = xt.encode(); - let encoded_len = encoded.len() as u64; - // on_initialize weight + base block execution weight - let block_weights = ::BlockWeights::get(); - let base_block_weight = Weight::from_parts(175, 0) + block_weights.base_block; - let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() - base_block_weight; - let num_to_exhaust_block = limit.ref_time() / (encoded_len + 5); - t.execute_with(|| { - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - // Base block execution weight + `on_initialize` weight from the custom module. - assert_eq!(>::block_weight().total(), base_block_weight); - - for nonce in 0..=num_to_exhaust_block { - let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { - dest: 33, - value: 0, - }), - sign_extra(1, nonce.into(), 0), - ); - let res = Executive::apply_extrinsic(xt); - if nonce != num_to_exhaust_block { - assert!(res.is_ok()); - assert_eq!( - >::block_weight().total(), - //--------------------- on_initialize + block_execution + extrinsic_base weight - Weight::from_parts((encoded_len + 5) * (nonce + 1), 0) + base_block_weight, - ); - assert_eq!( - >::extrinsic_index(), - Some(nonce as u32 + 1) - ); - } else { - assert_eq!(res, Err(InvalidTransaction::ExhaustsResources.into())); - } - } - }); - } - - #[test] - fn block_weight_and_size_is_stored_per_tx() { - let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), - ); - let x1 = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 1, 0), - ); - let x2 = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 2, 0), - ); - let len = xt.clone().encode().len() as u32; - let mut t = new_test_ext(1); - t.execute_with(|| { - // Block execution weight + on_initialize weight from custom module - let base_block_weight = Weight::from_parts(175, 0) + - ::BlockWeights::get().base_block; - - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - assert_eq!(>::block_weight().total(), base_block_weight); - assert_eq!(>::all_extrinsics_len(), 0); - - assert!(Executive::apply_extrinsic(xt.clone()).unwrap().is_ok()); - assert!(Executive::apply_extrinsic(x1.clone()).unwrap().is_ok()); - assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); - - // default weight for `TestXt` == encoded length. - let extrinsic_weight = Weight::from_parts(len as u64, 0) + - ::BlockWeights::get() - .get(DispatchClass::Normal) - .base_extrinsic; - assert_eq!( - >::block_weight().total(), - base_block_weight + 3u64 * extrinsic_weight, - ); - assert_eq!(>::all_extrinsics_len(), 3 * len); - - let _ = >::finalize(); - // All extrinsics length cleaned on `System::finalize` - assert_eq!(>::all_extrinsics_len(), 0); - - // New Block - Executive::initialize_block(&Header::new( - 2, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - // Block weight cleaned up on `System::initialize` - assert_eq!(>::block_weight().total(), base_block_weight); - }); - } - - #[test] - fn validate_unsigned() { - let valid = TestXt::new(RuntimeCall::Custom(custom::Call::allowed_unsigned {}), None); - let invalid = TestXt::new(RuntimeCall::Custom(custom::Call::unallowed_unsigned {}), None); - let mut t = new_test_ext(1); - - t.execute_with(|| { - assert_eq!( - Executive::validate_transaction( - TransactionSource::InBlock, - valid.clone(), - Default::default(), - ), - Ok(ValidTransaction::default()), - ); - assert_eq!( - Executive::validate_transaction( - TransactionSource::InBlock, - invalid.clone(), - Default::default(), - ), - Err(TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator)), - ); - assert_eq!(Executive::apply_extrinsic(valid), Ok(Err(DispatchError::BadOrigin))); - assert_eq!( - Executive::apply_extrinsic(invalid), - Err(TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator)) - ); - }); - } - - #[test] - fn can_not_pay_for_tx_fee_on_full_lock() { - let mut t = new_test_ext(1); - t.execute_with(|| { - as fungible::MutateFreeze>::set_freeze( - &(), - &1, - 110, - ) - .unwrap(); - let xt = TestXt::new( - RuntimeCall::System(frame_system::Call::remark { remark: vec![1u8] }), - sign_extra(1, 0, 0), - ); - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - assert_eq!(Executive::apply_extrinsic(xt), Err(InvalidTransaction::Payment.into()),); - assert_eq!(>::total_balance(&1), 111); - }); - } - - #[test] - fn block_hooks_weight_is_stored() { - new_test_ext(1).execute_with(|| { - Executive::initialize_block(&Header::new_from_number(1)); - Executive::finalize_block(); - // NOTE: might need updates over time if new weights are introduced. - // For now it only accounts for the base block execution weight and - // the `on_initialize` weight defined in the custom test module. - assert_eq!( - >::block_weight().total(), - Weight::from_parts(175 + 175 + 10, 0) - ); - }) - } - - #[test] - fn runtime_upgraded_should_work() { - new_test_ext(1).execute_with(|| { - RuntimeVersionTestValues::mutate(|v| *v = Default::default()); - // It should be added at genesis - assert!(LastRuntimeUpgrade::::exists()); - assert!(!Executive::runtime_upgraded()); - - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - assert!(Executive::runtime_upgraded()); - - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { - spec_version: 1, - spec_name: "test".into(), - ..Default::default() - } - }); - assert!(Executive::runtime_upgraded()); - - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { - spec_version: 0, - impl_version: 2, - ..Default::default() - } - }); - assert!(!Executive::runtime_upgraded()); - - LastRuntimeUpgrade::::take(); - assert!(Executive::runtime_upgraded()); - }) - } - - #[test] - fn last_runtime_upgrade_was_upgraded_works() { - let test_data = vec![ - (0, "", 1, "", true), - (1, "", 1, "", false), - (1, "", 1, "test", true), - (1, "", 0, "", false), - (1, "", 0, "test", true), - ]; - - for (spec_version, spec_name, c_spec_version, c_spec_name, result) in test_data { - let current = sp_version::RuntimeVersion { - spec_version: c_spec_version, - spec_name: c_spec_name.into(), - ..Default::default() - }; - - let last = LastRuntimeUpgradeInfo { - spec_version: spec_version.into(), - spec_name: spec_name.into(), - }; - - assert_eq!(result, last.was_upgraded(¤t)); - } - } - - #[test] - fn custom_runtime_upgrade_is_called_before_modules() { - new_test_ext(1).execute_with(|| { - // Make sure `on_runtime_upgrade` is called. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); - assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); - assert_eq!( - Some(RuntimeVersionTestValues::get().into()), - LastRuntimeUpgrade::::get(), - ) - }); - } - - #[test] - fn event_from_runtime_upgrade_is_included() { - new_test_ext(1).execute_with(|| { - // Make sure `on_runtime_upgrade` is called. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - - // set block number to non zero so events are not excluded - System::set_block_number(1); - - Executive::initialize_block(&Header::new( - 2, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - System::assert_last_event(frame_system::Event::::CodeUpdated.into()); - }); - } - - /// Regression test that ensures that the custom on runtime upgrade is called when executive is - /// used through the `ExecuteBlock` trait. - #[test] - fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { - let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), - ); - - let header = new_test_ext(1).execute_with(|| { - // Make sure `on_runtime_upgrade` is called. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - - // Let's build some fake block. - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); - - Executive::finalize_block() - }); - - // Reset to get the correct new genesis below. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 0, ..Default::default() } - }); - - new_test_ext(1).execute_with(|| { - // Make sure `on_runtime_upgrade` is called. - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - - >>::execute_block(Block::new(header, vec![xt])); - - assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); - assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); - }); - } - - #[test] - fn all_weights_are_recorded_correctly() { - // Reset to get the correct new genesis below. - RuntimeVersionTestValues::take(); - - new_test_ext(1).execute_with(|| { - // Make sure `on_runtime_upgrade` is called for maximum complexity - RuntimeVersionTestValues::mutate(|v| { - *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } - }); - - let block_number = 1; - - Executive::initialize_block(&Header::new( - block_number, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - // Reset the last runtime upgrade info, to make the second call to `on_runtime_upgrade` - // succeed. - LastRuntimeUpgrade::::take(); - - // All weights that show up in the `initialize_block_impl` - let custom_runtime_upgrade_weight = CustomOnRuntimeUpgrade::on_runtime_upgrade(); - let runtime_upgrade_weight = - ::on_runtime_upgrade(); - let on_initialize_weight = - >::on_initialize(block_number); - let base_block_weight = - ::BlockWeights::get().base_block; - - // Weights are recorded correctly - assert_eq!( - frame_system::Pallet::::block_weight().total(), - custom_runtime_upgrade_weight + - runtime_upgrade_weight + - on_initialize_weight + base_block_weight, - ); - }); - } - - #[test] - fn offchain_worker_works_as_expected() { - new_test_ext(1).execute_with(|| { - let parent_hash = sp_core::H256::from([69u8; 32]); - let mut digest = Digest::default(); - digest.push(DigestItem::Seal([1, 2, 3, 4], vec![5, 6, 7, 8])); - - let header = - Header::new(1, H256::default(), H256::default(), parent_hash, digest.clone()); - - Executive::offchain_worker(&header); - - assert_eq!(digest, System::digest()); - assert_eq!(parent_hash, System::block_hash(0)); - assert_eq!(header.hash(), System::block_hash(1)); - }); - } - - #[test] - fn calculating_storage_root_twice_works() { - let call = RuntimeCall::Custom(custom::Call::calculate_storage_root {}); - let xt = TestXt::new(call, sign_extra(1, 0, 0)); - - let header = new_test_ext(1).execute_with(|| { - // Let's build some fake block. - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); - - Executive::finalize_block() - }); - - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block::new(header, vec![xt])); - }); - } - - #[test] - #[should_panic(expected = "Invalid inherent position for extrinsic at index 1")] - fn invalid_inherent_position_fail() { - let xt1 = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), - ); - let xt2 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), None); - - let header = new_test_ext(1).execute_with(|| { - // Let's build some fake block. - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); - Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); - - Executive::finalize_block() - }); - - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block::new(header, vec![xt1, xt2])); - }); - } - - #[test] - fn valid_inherents_position_works() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); - - let header = new_test_ext(1).execute_with(|| { - // Let's build some fake block. - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); - Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); - - Executive::finalize_block() - }); - - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block::new(header, vec![xt1, xt2])); - }); - } - - #[test] - #[should_panic(expected = "A call was labelled as mandatory, but resulted in an Error.")] - fn invalid_inherents_fail_block_execution() { - let xt1 = - TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), sign_extra(1, 0, 0)); - - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block::new( - Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - ), - vec![xt1], - )); - }); - } - - // Inherents are created by the runtime and don't need to be validated. - #[test] - fn inherents_fail_validate_block() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), None); - - new_test_ext(1).execute_with(|| { - assert_eq!( - Executive::validate_transaction(TransactionSource::External, xt1, H256::random()) - .unwrap_err(), - InvalidTransaction::MandatoryValidation.into() - ); - }) - } -} diff --git a/substrate/frame/executive/src/tests.rs b/substrate/frame/executive/src/tests.rs new file mode 100644 index 00000000000..70b55f6e855 --- /dev/null +++ b/substrate/frame/executive/src/tests.rs @@ -0,0 +1,1389 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test the `frame-executive` crate. + +use super::*; + +use sp_core::H256; +use sp_runtime::{ + generic::{DigestItem, Era}, + testing::{Block, Digest, Header}, + traits::{Block as BlockT, Header as HeaderT}, + transaction_validity::{ + InvalidTransaction, TransactionValidityError, UnknownTransaction, ValidTransaction, + }, + BuildStorage, DispatchError, +}; + +use frame_support::{ + assert_err, assert_ok, derive_impl, + migrations::MultiStepMigrator, + pallet_prelude::*, + parameter_types, + traits::{fungible, ConstU8, Currency, IsInherent}, + weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight, WeightMeter, WeightToFee}, +}; +use frame_system::{pallet_prelude::*, ChainContext, LastRuntimeUpgrade, LastRuntimeUpgradeInfo}; +use pallet_balances::Call as BalancesCall; +use pallet_transaction_payment::CurrencyAdapter; + +const TEST_KEY: &[u8] = b":test:key:"; + +#[frame_support::pallet(dev_mode)] +mod custom { + use super::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::hooks] + impl Hooks> for Pallet { + // module hooks. + // one with block number arg and one without + fn on_initialize(_: BlockNumberFor) -> Weight { + Weight::from_parts(175, 0) + } + + fn on_idle(_: BlockNumberFor, _: Weight) -> Weight { + Weight::from_parts(175, 0) + } + + fn on_poll(_: BlockNumberFor, _: &mut WeightMeter) {} + + fn on_finalize(_: BlockNumberFor) {} + + fn on_runtime_upgrade() -> Weight { + sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); + Weight::from_parts(200, 0) + } + + fn offchain_worker(n: BlockNumberFor) { + assert_eq!(BlockNumberFor::::from(1u32), n); + } + } + + #[pallet::call] + impl Pallet { + pub fn some_function(origin: OriginFor) -> DispatchResult { + // NOTE: does not make any difference. + frame_system::ensure_signed(origin)?; + Ok(()) + } + + #[pallet::weight((200, DispatchClass::Operational))] + pub fn some_root_operation(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } + + pub fn some_unsigned_message(origin: OriginFor) -> DispatchResult { + frame_system::ensure_none(origin)?; + Ok(()) + } + + pub fn allowed_unsigned(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } + + pub fn unallowed_unsigned(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } + + #[pallet::weight((0, DispatchClass::Mandatory))] + pub fn inherent(origin: OriginFor) -> DispatchResult { + frame_system::ensure_none(origin)?; + Ok(()) + } + + pub fn calculate_storage_root(_origin: OriginFor) -> DispatchResult { + let root = sp_io::storage::root(sp_runtime::StateVersion::V1); + sp_io::storage::set("storage_root".as_bytes(), &root); + Ok(()) + } + } + + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + + type Error = sp_inherents::MakeFatalError<()>; + + const INHERENT_IDENTIFIER: [u8; 8] = *b"test1234"; + + fn create_inherent(_data: &InherentData) -> Option { + None + } + + fn is_inherent(call: &Self::Call) -> bool { + *call == Call::::inherent {} + } + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + + // Inherent call is accepted for being dispatched + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + match call { + Call::allowed_unsigned { .. } => Ok(()), + Call::inherent { .. } => Ok(()), + _ => Err(UnknownTransaction::NoUnsignedValidator.into()), + } + } + + // Inherent call is not validated as unsigned + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + match call { + Call::allowed_unsigned { .. } => Ok(Default::default()), + _ => UnknownTransaction::NoUnsignedValidator.into(), + } + } + } +} + +#[frame_support::pallet(dev_mode)] +mod custom2 { + use super::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::hooks] + impl Hooks> for Pallet { + // module hooks. + // one with block number arg and one without + fn on_initialize(_: BlockNumberFor) -> Weight { + assert!( + !MockedSystemCallbacks::pre_inherent_called(), + "Pre inherent hook goes after on_initialize" + ); + + Weight::from_parts(0, 0) + } + + fn on_idle(_: BlockNumberFor, _: Weight) -> Weight { + assert!( + MockedSystemCallbacks::post_transactions_called(), + "Post transactions hook goes before after on_idle" + ); + Weight::from_parts(0, 0) + } + + fn on_finalize(_: BlockNumberFor) { + assert!( + MockedSystemCallbacks::post_transactions_called(), + "Post transactions hook goes before after on_finalize" + ); + } + + fn on_runtime_upgrade() -> Weight { + sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); + Weight::from_parts(0, 0) + } + } + + #[pallet::call] + impl Pallet { + pub fn allowed_unsigned(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } + + pub fn some_call(_: OriginFor) -> DispatchResult { + assert!(MockedSystemCallbacks::post_inherent_called()); + assert!(!MockedSystemCallbacks::post_transactions_called()); + assert!(System::inherents_applied()); + + Ok(()) + } + + #[pallet::weight({0})] + pub fn optional_inherent(origin: OriginFor) -> DispatchResult { + frame_system::ensure_none(origin)?; + + assert!(MockedSystemCallbacks::pre_inherent_called()); + assert!(!MockedSystemCallbacks::post_inherent_called(), "Should not already be called"); + assert!(!System::inherents_applied()); + + Ok(()) + } + + #[pallet::weight((0, DispatchClass::Mandatory))] + pub fn inherent(origin: OriginFor) -> DispatchResult { + frame_system::ensure_none(origin)?; + + assert!(MockedSystemCallbacks::pre_inherent_called()); + assert!(!MockedSystemCallbacks::post_inherent_called(), "Should not already be called"); + assert!(!System::inherents_applied()); + + Ok(()) + } + } + + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + + type Error = sp_inherents::MakeFatalError<()>; + + const INHERENT_IDENTIFIER: [u8; 8] = *b"test1235"; + + fn create_inherent(_data: &InherentData) -> Option { + None + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::::inherent {} | Call::::optional_inherent {}) + } + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + + // Inherent call is accepted for being dispatched + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + match call { + Call::allowed_unsigned { .. } | + Call::optional_inherent { .. } | + Call::inherent { .. } => Ok(()), + _ => Err(UnknownTransaction::NoUnsignedValidator.into()), + } + } + + // Inherent call is not validated as unsigned + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + match call { + Call::allowed_unsigned { .. } => Ok(Default::default()), + _ => UnknownTransaction::NoUnsignedValidator.into(), + } + } + } +} + +frame_support::construct_runtime!( + pub struct Runtime + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, + Custom: custom::{Pallet, Call, ValidateUnsigned, Inherent}, + Custom2: custom2::{Pallet, Call, ValidateUnsigned, Inherent}, + } +); + +parameter_types! { + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::builder() + .base_block(Weight::from_parts(10, 0)) + .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = Weight::from_parts(5, 0)) + .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = Weight::from_parts(1024, u64::MAX).into()) + .build_or_panic(); + pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 10, + write: 100, + }; +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type BlockWeights = BlockWeights; + type RuntimeOrigin = RuntimeOrigin; + type Nonce = u64; + type RuntimeCall = RuntimeCall; + type Block = TestBlock; + type RuntimeEvent = RuntimeEvent; + type Version = RuntimeVersion; + type AccountData = pallet_balances::AccountData; + type PreInherents = MockedSystemCallbacks; + type PostInherents = MockedSystemCallbacks; + type PostTransactions = MockedSystemCallbacks; + type MultiBlockMigrator = MockedModeGetter; +} + +type Balance = u64; + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +impl pallet_balances::Config for Runtime { + type Balance = Balance; + type AccountStore = System; +} + +parameter_types! { + pub const TransactionByteFee: Balance = 0; +} +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = CurrencyAdapter; + type OperationalFeeMultiplier = ConstU8<5>; + type WeightToFee = IdentityFee; + type LengthToFee = ConstantMultiplier; + type FeeMultiplierUpdate = (); +} + +impl custom::Config for Runtime {} +impl custom2::Config for Runtime {} + +pub struct RuntimeVersion; +impl frame_support::traits::Get for RuntimeVersion { + fn get() -> sp_version::RuntimeVersion { + RuntimeVersionTestValues::get().clone() + } +} + +parameter_types! { + pub static RuntimeVersionTestValues: sp_version::RuntimeVersion = + Default::default(); +} + +type SignedExtra = ( + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, +); +type TestXt = sp_runtime::testing::TestXt; +type TestBlock = Block; + +// Will contain `true` when the custom runtime logic was called. +const CUSTOM_ON_RUNTIME_KEY: &[u8] = b":custom:on_runtime"; + +struct CustomOnRuntimeUpgrade; +impl OnRuntimeUpgrade for CustomOnRuntimeUpgrade { + fn on_runtime_upgrade() -> Weight { + sp_io::storage::set(TEST_KEY, "custom_upgrade".as_bytes()); + sp_io::storage::set(CUSTOM_ON_RUNTIME_KEY, &true.encode()); + System::deposit_event(frame_system::Event::CodeUpdated); + + assert_eq!(0, System::last_runtime_upgrade_spec_version()); + + Weight::from_parts(100, 0) + } +} + +type Executive = super::Executive< + Runtime, + Block, + ChainContext, + Runtime, + AllPalletsWithSystem, + CustomOnRuntimeUpgrade, +>; + +parameter_types! { + pub static SystemCallbacksCalled: u32 = 0; +} + +pub struct MockedSystemCallbacks; +impl PreInherents for MockedSystemCallbacks { + fn pre_inherents() { + assert_eq!(SystemCallbacksCalled::get(), 0); + SystemCallbacksCalled::set(1); + // Change the storage to modify the root hash: + frame_support::storage::unhashed::put(b":pre_inherent", b"0"); + } +} + +impl PostInherents for MockedSystemCallbacks { + fn post_inherents() { + assert_eq!(SystemCallbacksCalled::get(), 1); + SystemCallbacksCalled::set(2); + // Change the storage to modify the root hash: + frame_support::storage::unhashed::put(b":post_inherent", b"0"); + } +} + +impl PostTransactions for MockedSystemCallbacks { + fn post_transactions() { + assert_eq!(SystemCallbacksCalled::get(), 2); + SystemCallbacksCalled::set(3); + // Change the storage to modify the root hash: + frame_support::storage::unhashed::put(b":post_transaction", b"0"); + } +} + +impl MockedSystemCallbacks { + fn pre_inherent_called() -> bool { + SystemCallbacksCalled::get() >= 1 + } + + fn post_inherent_called() -> bool { + SystemCallbacksCalled::get() >= 2 + } + + fn post_transactions_called() -> bool { + SystemCallbacksCalled::get() >= 3 + } + + fn reset() { + SystemCallbacksCalled::set(0); + frame_support::storage::unhashed::kill(b":pre_inherent"); + frame_support::storage::unhashed::kill(b":post_inherent"); + frame_support::storage::unhashed::kill(b":post_transaction"); + } +} + +parameter_types! { + pub static MbmActive: bool = false; +} + +pub struct MockedModeGetter; +impl MultiStepMigrator for MockedModeGetter { + fn ongoing() -> bool { + MbmActive::get() + } + + fn step() -> Weight { + Weight::zero() + } +} + +fn extra(nonce: u64, fee: Balance) -> SignedExtra { + ( + frame_system::CheckEra::from(Era::Immortal), + frame_system::CheckNonce::from(nonce), + frame_system::CheckWeight::new(), + pallet_transaction_payment::ChargeTransactionPayment::from(fee), + ) +} + +fn sign_extra(who: u64, nonce: u64, fee: Balance) -> Option<(u64, SignedExtra)> { + Some((who, extra(nonce, fee))) +} + +fn call_transfer(dest: u64, value: u64) -> RuntimeCall { + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value }) +} + +#[test] +fn balance_transfer_dispatch_works() { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 211)] } + .assimilate_storage(&mut t) + .unwrap(); + let xt = TestXt::new(call_transfer(2, 69), sign_extra(1, 0, 0)); + let weight = xt.get_dispatch_info().weight + + ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + let fee: Balance = + ::WeightToFee::weight_to_fee(&weight); + let mut t = sp_io::TestExternalities::new(t); + t.execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + let r = Executive::apply_extrinsic(xt); + assert!(r.is_ok()); + assert_eq!(>::total_balance(&1), 142 - fee); + assert_eq!(>::total_balance(&2), 69); + }); +} + +fn new_test_ext(balance_factor: Balance) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } + .assimilate_storage(&mut t) + .unwrap(); + let mut ext: sp_io::TestExternalities = t.into(); + ext.execute_with(|| { + SystemCallbacksCalled::set(0); + }); + ext +} + +fn new_test_ext_v0(balance_factor: Balance) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } + .assimilate_storage(&mut t) + .unwrap(); + (t, sp_runtime::StateVersion::V0).into() +} + +#[test] +fn block_import_works() { + block_import_works_inner( + new_test_ext_v0(1), + array_bytes::hex_n_into_unchecked( + "4826d3bdf87dbbc883d2ab274cbe272f58ed94a904619b59953e48294d1142d2", + ), + ); + block_import_works_inner( + new_test_ext(1), + array_bytes::hex_n_into_unchecked( + "d6b465f5a50c9f8d5a6edc0f01d285a6b19030f097d3aaf1649b7be81649f118", + ), + ); +} +fn block_import_works_inner(mut ext: sp_io::TestExternalities, state_root: H256) { + ext.execute_with(|| { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root, + extrinsics_root: array_bytes::hex_n_into_unchecked( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", + ), + digest: Digest { logs: vec![] }, + }, + extrinsics: vec![], + }); + }); +} + +#[test] +#[should_panic] +fn block_import_of_bad_state_root_fails() { + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: [0u8; 32].into(), + extrinsics_root: array_bytes::hex_n_into_unchecked( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", + ), + digest: Digest { logs: vec![] }, + }, + extrinsics: vec![], + }); + }); +} + +#[test] +#[should_panic] +fn block_import_of_bad_extrinsic_root_fails() { + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: array_bytes::hex_n_into_unchecked( + "75e7d8f360d375bbe91bcf8019c01ab6362448b4a89e3b329717eb9d910340e5", + ), + extrinsics_root: [0u8; 32].into(), + digest: Digest { logs: vec![] }, + }, + extrinsics: vec![], + }); + }); +} + +#[test] +fn bad_extrinsic_not_inserted() { + let mut t = new_test_ext(1); + // bad nonce check! + let xt = TestXt::new(call_transfer(33, 69), sign_extra(1, 30, 0)); + t.execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + assert_err!( + Executive::apply_extrinsic(xt), + TransactionValidityError::Invalid(InvalidTransaction::Future) + ); + assert_eq!(>::extrinsic_index(), Some(0)); + }); +} + +#[test] +fn block_weight_limit_enforced() { + let mut t = new_test_ext(10000); + // given: TestXt uses the encoded len as fixed Len: + let xt = TestXt::new( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + sign_extra(1, 0, 0), + ); + let encoded = xt.encode(); + let encoded_len = encoded.len() as u64; + // on_initialize weight + base block execution weight + let block_weights = ::BlockWeights::get(); + let base_block_weight = Weight::from_parts(175, 0) + block_weights.base_block; + let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() - base_block_weight; + let num_to_exhaust_block = limit.ref_time() / (encoded_len + 5); + t.execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + // Base block execution weight + `on_initialize` weight from the custom module. + assert_eq!(>::block_weight().total(), base_block_weight); + + for nonce in 0..=num_to_exhaust_block { + let xt = TestXt::new( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + sign_extra(1, nonce.into(), 0), + ); + let res = Executive::apply_extrinsic(xt); + if nonce != num_to_exhaust_block { + assert!(res.is_ok()); + assert_eq!( + >::block_weight().total(), + //--------------------- on_initialize + block_execution + extrinsic_base weight + Weight::from_parts((encoded_len + 5) * (nonce + 1), 0) + base_block_weight, + ); + assert_eq!( + >::extrinsic_index(), + Some(nonce as u32 + 1) + ); + } else { + assert_eq!(res, Err(InvalidTransaction::ExhaustsResources.into())); + } + } + }); +} + +#[test] +fn block_weight_and_size_is_stored_per_tx() { + let xt = TestXt::new( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + sign_extra(1, 0, 0), + ); + let x1 = TestXt::new( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + sign_extra(1, 1, 0), + ); + let x2 = TestXt::new( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + sign_extra(1, 2, 0), + ); + let len = xt.clone().encode().len() as u32; + let mut t = new_test_ext(1); + t.execute_with(|| { + // Block execution weight + on_initialize weight from custom module + let base_block_weight = Weight::from_parts(175, 0) + + ::BlockWeights::get().base_block; + + Executive::initialize_block(&Header::new_from_number(1)); + + assert_eq!(>::block_weight().total(), base_block_weight); + assert_eq!(>::all_extrinsics_len(), 0); + + assert!(Executive::apply_extrinsic(xt.clone()).unwrap().is_ok()); + assert!(Executive::apply_extrinsic(x1.clone()).unwrap().is_ok()); + assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); + + // default weight for `TestXt` == encoded length. + let extrinsic_weight = Weight::from_parts(len as u64, 0) + + ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + assert_eq!( + >::block_weight().total(), + base_block_weight + 3u64 * extrinsic_weight, + ); + assert_eq!(>::all_extrinsics_len(), 3 * len); + + let _ = >::finalize(); + // All extrinsics length cleaned on `System::finalize` + assert_eq!(>::all_extrinsics_len(), 0); + + // Reset to a new block. + SystemCallbacksCalled::take(); + Executive::initialize_block(&Header::new_from_number(2)); + + // Block weight cleaned up on `System::initialize` + assert_eq!(>::block_weight().total(), base_block_weight); + }); +} + +#[test] +fn validate_unsigned() { + let valid = TestXt::new(RuntimeCall::Custom(custom::Call::allowed_unsigned {}), None); + let invalid = TestXt::new(RuntimeCall::Custom(custom::Call::unallowed_unsigned {}), None); + let mut t = new_test_ext(1); + + t.execute_with(|| { + assert_eq!( + Executive::validate_transaction( + TransactionSource::InBlock, + valid.clone(), + Default::default(), + ), + Ok(ValidTransaction::default()), + ); + assert_eq!( + Executive::validate_transaction( + TransactionSource::InBlock, + invalid.clone(), + Default::default(), + ), + Err(TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator)), + ); + // Need to initialize the block before applying extrinsics for the `MockedSystemCallbacks` + // check. + Executive::initialize_block(&Header::new_from_number(1)); + assert_eq!(Executive::apply_extrinsic(valid), Ok(Err(DispatchError::BadOrigin))); + assert_eq!( + Executive::apply_extrinsic(invalid), + Err(TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator)) + ); + }); +} + +#[test] +fn can_not_pay_for_tx_fee_on_full_lock() { + let mut t = new_test_ext(1); + t.execute_with(|| { + as fungible::MutateFreeze>::set_freeze(&(), &1, 110) + .unwrap(); + let xt = TestXt::new( + RuntimeCall::System(frame_system::Call::remark { remark: vec![1u8] }), + sign_extra(1, 0, 0), + ); + Executive::initialize_block(&Header::new_from_number(1)); + + assert_eq!(Executive::apply_extrinsic(xt), Err(InvalidTransaction::Payment.into()),); + assert_eq!(>::total_balance(&1), 111); + }); +} + +#[test] +fn block_hooks_weight_is_stored() { + new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + Executive::finalize_block(); + // NOTE: might need updates over time if new weights are introduced. + // For now it only accounts for the base block execution weight and + // the `on_initialize` weight defined in the custom test module. + assert_eq!( + >::block_weight().total(), + Weight::from_parts(175 + 175 + 10, 0) + ); + }) +} + +#[test] +fn runtime_upgraded_should_work() { + new_test_ext(1).execute_with(|| { + RuntimeVersionTestValues::mutate(|v| *v = Default::default()); + // It should be added at genesis + assert!(LastRuntimeUpgrade::::exists()); + assert!(!Executive::runtime_upgraded()); + + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + assert!(Executive::runtime_upgraded()); + + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { + spec_version: 1, + spec_name: "test".into(), + ..Default::default() + } + }); + assert!(Executive::runtime_upgraded()); + + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { + spec_version: 0, + impl_version: 2, + ..Default::default() + } + }); + assert!(!Executive::runtime_upgraded()); + + LastRuntimeUpgrade::::take(); + assert!(Executive::runtime_upgraded()); + }) +} + +#[test] +fn last_runtime_upgrade_was_upgraded_works() { + let test_data = vec![ + (0, "", 1, "", true), + (1, "", 1, "", false), + (1, "", 1, "test", true), + (1, "", 0, "", false), + (1, "", 0, "test", true), + ]; + + for (spec_version, spec_name, c_spec_version, c_spec_name, result) in test_data { + let current = sp_version::RuntimeVersion { + spec_version: c_spec_version, + spec_name: c_spec_name.into(), + ..Default::default() + }; + + let last = LastRuntimeUpgradeInfo { + spec_version: spec_version.into(), + spec_name: spec_name.into(), + }; + + assert_eq!(result, last.was_upgraded(¤t)); + } +} + +#[test] +fn custom_runtime_upgrade_is_called_before_modules() { + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + Executive::initialize_block(&Header::new_from_number(1)); + + assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); + assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); + assert_eq!( + Some(RuntimeVersionTestValues::get().into()), + LastRuntimeUpgrade::::get(), + ) + }); +} + +#[test] +fn event_from_runtime_upgrade_is_included() { + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + // set block number to non zero so events are not excluded + System::set_block_number(1); + + Executive::initialize_block(&Header::new_from_number(2)); + System::assert_last_event(frame_system::Event::::CodeUpdated.into()); + }); +} + +/// Regression test that ensures that the custom on runtime upgrade is called when executive is +/// used through the `ExecuteBlock` trait. +#[test] +fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { + let xt = TestXt::new( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + sign_extra(1, 0, 0), + ); + + let header = new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + // Reset to get the correct new genesis below. + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 0, ..Default::default() } + }); + + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + >>::execute_block(Block::new(header, vec![xt])); + + assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); + assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); + }); +} + +#[test] +fn all_weights_are_recorded_correctly() { + // Reset to get the correct new genesis below. + RuntimeVersionTestValues::take(); + + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called for maximum complexity + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + let block_number = 1; + + Executive::initialize_block(&Header::new_from_number(block_number)); + + // Reset the last runtime upgrade info, to make the second call to `on_runtime_upgrade` + // succeed. + LastRuntimeUpgrade::::take(); + MockedSystemCallbacks::reset(); + + // All weights that show up in the `initialize_block_impl` + let custom_runtime_upgrade_weight = CustomOnRuntimeUpgrade::on_runtime_upgrade(); + let runtime_upgrade_weight = + ::on_runtime_upgrade(); + let on_initialize_weight = + >::on_initialize(block_number); + let base_block_weight = ::BlockWeights::get().base_block; + + // Weights are recorded correctly + assert_eq!( + frame_system::Pallet::::block_weight().total(), + custom_runtime_upgrade_weight + + runtime_upgrade_weight + + on_initialize_weight + + base_block_weight, + ); + }); +} + +#[test] +fn offchain_worker_works_as_expected() { + new_test_ext(1).execute_with(|| { + let parent_hash = sp_core::H256::from([69u8; 32]); + let mut digest = Digest::default(); + digest.push(DigestItem::Seal([1, 2, 3, 4], vec![5, 6, 7, 8])); + + let header = Header::new(1, H256::default(), H256::default(), parent_hash, digest.clone()); + + Executive::offchain_worker(&header); + + assert_eq!(digest, System::digest()); + assert_eq!(parent_hash, System::block_hash(0)); + assert_eq!(header.hash(), System::block_hash(1)); + }); +} + +#[test] +fn calculating_storage_root_twice_works() { + let call = RuntimeCall::Custom(custom::Call::calculate_storage_root {}); + let xt = TestXt::new(call, sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new(header, vec![xt])); + }); +} + +#[test] +#[should_panic(expected = "Invalid inherent position for extrinsic at index 1")] +fn invalid_inherent_position_fail() { + let xt1 = TestXt::new( + RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), + sign_extra(1, 0, 0), + ); + let xt2 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new(header, vec![xt1, xt2])); + }); +} + +#[test] +fn valid_inherents_position_works() { + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new(header, vec![xt1, xt2])); + }); +} + +#[test] +#[should_panic(expected = "A call was labelled as mandatory, but resulted in an Error.")] +fn invalid_inherents_fail_block_execution() { + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), sign_extra(1, 0, 0)); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new( + Header::new(1, H256::default(), H256::default(), [69u8; 32].into(), Digest::default()), + vec![xt1], + )); + }); +} + +// Inherents are created by the runtime and don't need to be validated. +#[test] +fn inherents_fail_validate_block() { + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + + new_test_ext(1).execute_with(|| { + assert_eq!( + Executive::validate_transaction(TransactionSource::External, xt1, H256::random()) + .unwrap_err(), + InvalidTransaction::MandatoryValidation.into() + ); + }) +} + +/// Inherents still work while `initialize_block` forbids transactions. +#[test] +fn inherents_ok_while_exts_forbidden_works() { + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + + let header = new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + // This is not applied: + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + // Tell `initialize_block` to forbid extrinsics: + Executive::execute_block(Block::new(header, vec![xt1])); + }); +} + +/// Refuses to import blocks with transactions during `OnlyInherents` mode. +#[test] +#[should_panic = "Only inherents are allowed in this block"] +fn transactions_in_only_inherents_block_errors() { + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + MbmActive::set(true); + Executive::execute_block(Block::new(header, vec![xt1, xt2])); + }); +} + +/// Same as above but no error. +#[test] +fn transactions_in_normal_block_works() { + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + // Tell `initialize_block` to forbid extrinsics: + Executive::execute_block(Block::new(header, vec![xt1, xt2])); + }); +} + +#[test] +#[cfg(feature = "try-runtime")] +fn try_execute_block_works() { + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::try_execute_block( + Block::new(header, vec![xt1, xt2]), + true, + true, + frame_try_runtime::TryStateSelect::All, + ) + .unwrap(); + }); +} + +/// Same as `extrinsic_while_exts_forbidden_errors` but using the try-runtime function. +#[test] +#[cfg(feature = "try-runtime")] +#[should_panic = "Only inherents allowed"] +fn try_execute_tx_forbidden_errors() { + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + MbmActive::set(true); + Executive::try_execute_block( + Block::new(header, vec![xt1, xt2]), + true, + true, + frame_try_runtime::TryStateSelect::All, + ) + .unwrap(); + }); +} + +/// Check that `ensure_inherents_are_first` reports the correct indices. +#[test] +fn ensure_inherents_are_first_works() { + let in1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let in2 = TestXt::new(RuntimeCall::Custom2(custom2::Call::inherent {}), None); + let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + + // Mocked empty header: + let header = new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + assert_ok!(Runtime::ensure_inherents_are_first(&Block::new(header.clone(), vec![]),), 0); + assert_ok!( + Runtime::ensure_inherents_are_first(&Block::new(header.clone(), vec![xt2.clone()]),), + 0 + ); + assert_ok!( + Runtime::ensure_inherents_are_first(&Block::new(header.clone(), vec![in1.clone()])), + 1 + ); + assert_ok!( + Runtime::ensure_inherents_are_first(&Block::new( + header.clone(), + vec![in1.clone(), xt2.clone()] + ),), + 1 + ); + assert_ok!( + Runtime::ensure_inherents_are_first(&Block::new( + header.clone(), + vec![in2.clone(), in1.clone(), xt2.clone()] + ),), + 2 + ); + + assert_eq!( + Runtime::ensure_inherents_are_first(&Block::new( + header.clone(), + vec![xt2.clone(), in1.clone()] + ),), + Err(1) + ); + assert_eq!( + Runtime::ensure_inherents_are_first(&Block::new( + header.clone(), + vec![xt2.clone(), xt2.clone(), in1.clone()] + ),), + Err(2) + ); + assert_eq!( + Runtime::ensure_inherents_are_first(&Block::new( + header.clone(), + vec![xt2.clone(), xt2.clone(), xt2.clone(), in2.clone()] + ),), + Err(3) + ); + }); +} + +/// Check that block execution rejects blocks with transactions in them while MBMs are active and +/// also that all the system callbacks are called correctly. +#[test] +fn callbacks_in_block_execution_works() { + callbacks_in_block_execution_works_inner(false); + callbacks_in_block_execution_works_inner(true); +} + +fn callbacks_in_block_execution_works_inner(mbms_active: bool) { + MbmActive::set(mbms_active); + + for (n_in, n_tx) in (0..10usize).zip(0..10usize) { + let mut extrinsics = Vec::new(); + + let header = new_test_ext(10).execute_with(|| { + MockedSystemCallbacks::reset(); + Executive::initialize_block(&Header::new_from_number(1)); + assert_eq!(SystemCallbacksCalled::get(), 1); + + for i in 0..n_in { + let xt = if i % 2 == 0 { + TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None) + } else { + TestXt::new(RuntimeCall::Custom2(custom2::Call::optional_inherent {}), None) + }; + Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); + extrinsics.push(xt); + } + + for t in 0..n_tx { + let xt = TestXt::new( + RuntimeCall::Custom2(custom2::Call::some_call {}), + sign_extra(1, t as u64, 0), + ); + // Extrinsics can be applied even when MBMs are active. Only the `execute_block` + // will reject it. + Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); + extrinsics.push(xt); + } + + Executive::finalize_block() + }); + assert_eq!(SystemCallbacksCalled::get(), 3); + + new_test_ext(10).execute_with(|| { + let header = std::panic::catch_unwind(|| { + Executive::execute_block(Block::new(header, extrinsics)); + }); + + match header { + Err(e) => { + let err = e.downcast::<&str>().unwrap(); + assert_eq!(*err, "Only inherents are allowed in this block"); + assert!( + MbmActive::get() && n_tx > 0, + "Transactions should be rejected when MBMs are active" + ); + }, + Ok(_) => { + assert_eq!(SystemCallbacksCalled::get(), 3); + assert!( + !MbmActive::get() || n_tx == 0, + "MBMs should be deactivated after finalization" + ); + }, + } + }); + } +} + +#[test] +fn post_inherent_called_after_all_inherents() { + let in1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::inherent {}), None); + let xt1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::some_call {}), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(in1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + #[cfg(feature = "try-runtime")] + new_test_ext(1).execute_with(|| { + Executive::try_execute_block( + Block::new(header.clone(), vec![in1.clone(), xt1.clone()]), + true, + true, + frame_try_runtime::TryStateSelect::All, + ) + .unwrap(); + assert!(MockedSystemCallbacks::post_transactions_called()); + }); + + new_test_ext(1).execute_with(|| { + MockedSystemCallbacks::reset(); + Executive::execute_block(Block::new(header, vec![in1, xt1])); + assert!(MockedSystemCallbacks::post_transactions_called()); + }); +} + +/// Regression test for AppSec finding #40. +#[test] +fn post_inherent_called_after_all_optional_inherents() { + let in1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::optional_inherent {}), None); + let xt1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::some_call {}), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new_from_number(1)); + + Executive::apply_extrinsic(in1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + #[cfg(feature = "try-runtime")] + new_test_ext(1).execute_with(|| { + Executive::try_execute_block( + Block::new(header.clone(), vec![in1.clone(), xt1.clone()]), + true, + true, + frame_try_runtime::TryStateSelect::All, + ) + .unwrap(); + assert!(MockedSystemCallbacks::post_transactions_called()); + }); + + new_test_ext(1).execute_with(|| { + MockedSystemCallbacks::reset(); + Executive::execute_block(Block::new(header, vec![in1, xt1])); + assert!(MockedSystemCallbacks::post_transactions_called()); + }); +} + +#[test] +fn is_inherent_works() { + let ext = TestXt::new(RuntimeCall::Custom2(custom2::Call::inherent {}), None); + assert!(Runtime::is_inherent(&ext)); + let ext = TestXt::new(RuntimeCall::Custom2(custom2::Call::optional_inherent {}), None); + assert!(Runtime::is_inherent(&ext)); + + let ext = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + assert!(!Runtime::is_inherent(&ext)); + + let ext = TestXt::new(RuntimeCall::Custom2(custom2::Call::allowed_unsigned {}), None); + assert!(!Runtime::is_inherent(&ext), "Unsigned ext are not automatically inherents"); +} diff --git a/substrate/frame/migrations/Cargo.toml b/substrate/frame/migrations/Cargo.toml new file mode 100644 index 00000000000..2914aa9ea97 --- /dev/null +++ b/substrate/frame/migrations/Cargo.toml @@ -0,0 +1,64 @@ +[package] +name = "pallet-migrations" +version = "1.0.0" +description = "FRAME pallet to execute multi-block migrations." +authors.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +docify = "0.1.14" +impl-trait-for-tuples = "0.2.2" +log = "0.4.18" +scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } + +frame-benchmarking = { default-features = false, optional = true, path = "../benchmarking" } +frame-support = { default-features = false, path = "../support" } +frame-system = { default-features = false, path = "../system" } +sp-core = { path = "../../primitives/core", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } + +[dev-dependencies] +frame-executive = { path = "../executive" } +sp-api = { path = "../../primitives/api", features = ["std"] } +sp-block-builder = { path = "../../primitives/block-builder", features = ["std"] } +sp-io = { path = "../../primitives/io", features = ["std"] } +sp-tracing = { path = "../../primitives/tracing", features = ["std"] } +sp-version = { path = "../../primitives/version", features = ["std"] } + +pretty_assertions = "1.3.0" + +[features] +default = ["std"] + +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] + +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] + +try-runtime = [ + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/migrations/src/benchmarking.rs b/substrate/frame/migrations/src/benchmarking.rs new file mode 100644 index 00000000000..8ad1fa50d14 --- /dev/null +++ b/substrate/frame/migrations/src/benchmarking.rs @@ -0,0 +1,222 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; + +use frame_benchmarking::{v2::*, BenchmarkError}; +use frame_system::{Pallet as System, RawOrigin}; +use sp_runtime::traits::One; + +fn assert_last_event(generic_event: ::RuntimeEvent) { + frame_system::Pallet::::assert_last_event(generic_event.into()); +} + +#[benchmarks] +mod benches { + use super::*; + use frame_support::traits::Hooks; + + #[benchmark] + fn onboard_new_mbms() { + T::Migrations::set_fail_after(0); // Should not be called anyway. + assert!(!Cursor::::exists()); + + #[block] + { + Pallet::::onboard_new_mbms(); + } + + assert_last_event::(Event::UpgradeStarted { migrations: 1 }.into()); + } + + #[benchmark] + fn progress_mbms_none() { + T::Migrations::set_fail_after(0); // Should not be called anyway. + assert!(!Cursor::::exists()); + + #[block] + { + Pallet::::progress_mbms(One::one()); + } + } + + /// All migrations completed. + #[benchmark] + fn exec_migration_completed() -> Result<(), BenchmarkError> { + T::Migrations::set_fail_after(0); // Should not be called anyway. + assert_eq!(T::Migrations::len(), 1, "Setup failed"); + let c = ActiveCursor { index: 1, inner_cursor: None, started_at: 0u32.into() }; + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + System::::set_block_number(1u32.into()); + + #[block] + { + Pallet::::exec_migration(c, false, &mut meter); + } + + assert_last_event::(Event::UpgradeCompleted {}.into()); + + Ok(()) + } + + /// No migration runs since it is skipped as historic. + #[benchmark] + fn exec_migration_skipped_historic() -> Result<(), BenchmarkError> { + T::Migrations::set_fail_after(0); // Should not be called anyway. + assert_eq!(T::Migrations::len(), 1, "Setup failed"); + let c = ActiveCursor { index: 0, inner_cursor: None, started_at: 0u32.into() }; + + let id: IdentifierOf = T::Migrations::nth_id(0).unwrap().try_into().unwrap(); + Historic::::insert(id, ()); + + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + System::::set_block_number(1u32.into()); + + #[block] + { + Pallet::::exec_migration(c, false, &mut meter); + } + + assert_last_event::(Event::MigrationSkipped { index: 0 }.into()); + + Ok(()) + } + + /// Advance a migration by one step. + #[benchmark] + fn exec_migration_advance() -> Result<(), BenchmarkError> { + T::Migrations::set_success_after(1); + assert_eq!(T::Migrations::len(), 1, "Setup failed"); + let c = ActiveCursor { index: 0, inner_cursor: None, started_at: 0u32.into() }; + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + System::::set_block_number(1u32.into()); + + #[block] + { + Pallet::::exec_migration(c, false, &mut meter); + } + + assert_last_event::(Event::MigrationAdvanced { index: 0, took: One::one() }.into()); + + Ok(()) + } + + /// Successfully complete a migration. + #[benchmark] + fn exec_migration_complete() -> Result<(), BenchmarkError> { + T::Migrations::set_success_after(0); + assert_eq!(T::Migrations::len(), 1, "Setup failed"); + let c = ActiveCursor { index: 0, inner_cursor: None, started_at: 0u32.into() }; + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + System::::set_block_number(1u32.into()); + + #[block] + { + Pallet::::exec_migration(c, false, &mut meter); + } + + assert_last_event::(Event::MigrationCompleted { index: 0, took: One::one() }.into()); + + Ok(()) + } + + #[benchmark] + fn exec_migration_fail() -> Result<(), BenchmarkError> { + T::Migrations::set_fail_after(0); + assert_eq!(T::Migrations::len(), 1, "Setup failed"); + let c = ActiveCursor { index: 0, inner_cursor: None, started_at: 0u32.into() }; + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + System::::set_block_number(1u32.into()); + + #[block] + { + Pallet::::exec_migration(c, false, &mut meter); + } + + assert_last_event::(Event::UpgradeFailed {}.into()); + + Ok(()) + } + + #[benchmark] + fn on_init_loop() { + T::Migrations::set_fail_after(0); // Should not be called anyway. + System::::set_block_number(1u32.into()); + Pallet::::on_runtime_upgrade(); + + #[block] + { + Pallet::::on_initialize(1u32.into()); + } + } + + #[benchmark] + fn force_set_cursor() { + #[extrinsic_call] + _(RawOrigin::Root, Some(cursor::())); + } + + #[benchmark] + fn force_set_active_cursor() { + #[extrinsic_call] + _(RawOrigin::Root, 0, None, None); + } + + #[benchmark] + fn force_onboard_mbms() { + #[extrinsic_call] + _(RawOrigin::Root); + } + + #[benchmark] + fn clear_historic(n: Linear<0, { DEFAULT_HISTORIC_BATCH_CLEAR_SIZE * 2 }>) { + let id_max_len = ::IdentifierMaxLen::get(); + assert!(id_max_len >= 4, "Precondition violated"); + + for i in 0..DEFAULT_HISTORIC_BATCH_CLEAR_SIZE * 2 { + let id = IdentifierOf::::truncate_from( + i.encode().into_iter().cycle().take(id_max_len as usize).collect::>(), + ); + + Historic::::insert(&id, ()); + } + + #[extrinsic_call] + _( + RawOrigin::Root, + HistoricCleanupSelector::Wildcard { limit: n.into(), previous_cursor: None }, + ); + } + + fn cursor() -> CursorOf { + // Note: The weight of a function can depend on the weight of reading the `inner_cursor`. + // `Cursor` is a user provided type. Now instead of requiring something like `Cursor: + // From`, we instead rely on the fact that it is MEL and the PoV benchmarking will + // therefore already take the MEL bound, even when the cursor in storage is `None`. + MigrationCursor::Active(ActiveCursor { + index: u32::MAX, + inner_cursor: None, + started_at: 0u32.into(), + }) + } + + // Implements a test for each benchmark. Execute with: + // `cargo test -p pallet-migrations --features runtime-benchmarks`. + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/substrate/frame/migrations/src/lib.rs b/substrate/frame/migrations/src/lib.rs new file mode 100644 index 00000000000..cd57d89f440 --- /dev/null +++ b/substrate/frame/migrations/src/lib.rs @@ -0,0 +1,746 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![deny(missing_docs)] +#![deny(rustdoc::broken_intra_doc_links)] + +//! # `pallet-migrations` +//! +//! Provides multi block migrations for FRAME runtimes. +//! +//! ## Overview +//! +//! The pallet takes care of executing a batch of multi-step migrations over multiple blocks. The +//! process starts on each runtime upgrade. Normal and operational transactions are paused while +//! migrations are on-going. +//! +//! ### Example +//! +//! This example demonstrates a simple mocked walk through of a basic success scenario. The pallet +//! is configured with two migrations: one succeeding after just one step, and the second one +//! succeeding after two steps. A runtime upgrade is then enacted and the block number is advanced +//! until all migrations finish executing. Afterwards, the recorded historic migrations are +//! checked and events are asserted. +#![doc = docify::embed!("substrate/frame/migrations/src/tests.rs", simple_works)] +//! +//! ## Pallet API +//! +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, +//! including its configuration trait, dispatchables, storage items, events and errors. +//! +//! Otherwise noteworthy API of this pallet include its implementation of the +//! [`MultiStepMigrator`] trait. This must be plugged into +//! [`frame_system::Config::MultiBlockMigrator`] for proper function. +//! +//! The API contains some calls for emergency management. They are all prefixed with `force_` and +//! should normally not be needed. Pay special attention prior to using them. +//! +//! ### Design Goals +//! +//! 1. Must automatically execute migrations over multiple blocks. +//! 2. Must expose information about whether migrations are ongoing. +//! 3. Must respect pessimistic weight bounds of migrations. +//! 4. Must execute migrations in order. Skipping is not allowed; migrations are run on a +//! all-or-nothing basis. +//! 5. Must prevent re-execution of past migrations. +//! 6. Must provide transactional storage semantics for migrations. +//! 7. Must guarantee progress. +//! +//! ### Design +//! +//! Migrations are provided to the pallet through the associated type [`Config::Migrations`] of type +//! [`SteppedMigrations`]. This allows multiple migrations to be aggregated through a tuple. It +//! simplifies the trait bounds since all associated types of the trait must be provided by the +//! pallet. The actual progress of the pallet is stored in the [`Cursor`] storage item. This can +//! either be [`MigrationCursor::Active`] or [`MigrationCursor::Stuck`]. In the active case it +//! points to the currently active migration and stores its inner cursor. The inner cursor can then +//! be used by the migration to store its inner state and advance. Each time when the migration +//! returns `Some(cursor)`, it signals the pallet that it is not done yet. +//! The cursor is reset on each runtime upgrade. This ensures that it starts to execute at the +//! first migration in the vector. The pallets cursor is only ever incremented or set to `Stuck` +//! once it encounters an error (Goal 4). Once in the stuck state, the pallet will stay stuck until +//! it is fixed through manual governance intervention. +//! As soon as the cursor of the pallet becomes `Some(_)`; [`MultiStepMigrator::ongoing`] returns +//! `true` (Goal 2). This can be used by upstream code to possibly pause transactions. +//! In `on_initialize` the pallet will load the current migration and check whether it was already +//! executed in the past by checking for membership of its ID in the [`Historic`] set. Historic +//! migrations are skipped without causing an error. Each successfully executed migration is added +//! to this set (Goal 5). +//! This proceeds until no more migrations remain. At that point, the event `UpgradeCompleted` is +//! emitted (Goal 1). +//! The execution of each migration happens by calling [`SteppedMigration::transactional_step`]. +//! This function wraps the inner `step` function into a transactional layer to allow rollback in +//! the error case (Goal 6). +//! Weight limits must be checked by the migration itself. The pallet provides a [`WeightMeter`] for +//! that purpose. The pallet may return [`SteppedMigrationError::InsufficientWeight`] at any point. +//! In that scenario, one of two things will happen: if that migration was exclusively executed +//! in this block, and therefore required more than the maximum amount of weight possible, the +//! process becomes `Stuck`. Otherwise, one re-attempt is executed with the same logic in the next +//! block (Goal 3). Progress through the migrations is guaranteed by providing a timeout for each +//! migration via [`SteppedMigration::max_steps`]. The pallet **ONLY** guarantees progress if this +//! is set to sensible limits (Goal 7). +//! +//! ### Scenario: Governance cleanup +//! +//! Every now and then, governance can make use of the [`clear_historic`][Pallet::clear_historic] +//! call. This ensures that no old migrations pile up in the [`Historic`] set. This can be done very +//! rarely, since the storage should not grow quickly and the lookup weight does not suffer much. +//! Another possibility would be to have a synchronous single-block migration perpetually deployed +//! that cleans them up before the MBMs start. +//! +//! ### Scenario: Successful upgrade +//! +//! The standard procedure for a successful runtime upgrade can look like this: +//! 1. Migrations are configured in the `Migrations` config item. All migrations expose +//! [`max_steps`][SteppedMigration::max_steps], are error tolerant, check their weight bounds and +//! have a unique identifier. +//! 2. The runtime upgrade is enacted. An `UpgradeStarted` event is +//! followed by lots of `MigrationAdvanced` and `MigrationCompleted` events. Finally +//! `UpgradeCompleted` is emitted. +//! 3. Cleanup as described in the governance scenario be executed at any time after the migrations +//! completed. +//! +//! ### Advice: Failed upgrades +//! +//! Failed upgrades cannot be recovered from automatically and require governance intervention. Set +//! up monitoring for `UpgradeFailed` events to be made aware of any failures. The hook +//! [`FailedMigrationHandler::failed`] should be setup in a way that it allows governance to act, +//! but still prevent other transactions from interacting with the inconsistent storage state. Note +//! that this is paramount, since the inconsistent state might contain a faulty balance amount or +//! similar that could cause great harm if user transactions don't remain suspended. One way to +//! implement this would be to use the `SafeMode` or `TxPause` pallets that can prevent most user +//! interactions but still allow a whitelisted set of governance calls. +//! +//! ### Remark: Failed migrations +//! +//! Failed migrations are not added to the `Historic` set. This means that an erroneous +//! migration must be removed and fixed manually. This already applies, even before considering the +//! historic set. +//! +//! ### Remark: Transactional processing +//! +//! You can see the transactional semantics for migration steps as mostly useless, since in the +//! stuck case the state is already messed up. This just prevents it from becoming even more messed +//! up, but doesn't prevent it in the first place. + +#![cfg_attr(not(feature = "std"), no_std)] + +mod benchmarking; +mod mock; +pub mod mock_helpers; +mod tests; +pub mod weights; + +pub use pallet::*; +pub use weights::WeightInfo; + +use codec::{Decode, Encode, MaxEncodedLen}; +use core::ops::ControlFlow; +use frame_support::{ + defensive, defensive_assert, + migrations::*, + traits::Get, + weights::{Weight, WeightMeter}, + BoundedVec, +}; +use frame_system::{pallet_prelude::BlockNumberFor, Pallet as System}; +use sp_runtime::Saturating; +use sp_std::vec::Vec; + +/// Points to the next migration to execute. +#[derive(Debug, Clone, Eq, PartialEq, Encode, Decode, scale_info::TypeInfo, MaxEncodedLen)] +pub enum MigrationCursor { + /// Points to the currently active migration and its inner cursor. + Active(ActiveCursor), + + /// Migration got stuck and cannot proceed. This is bad. + Stuck, +} + +impl MigrationCursor { + /// Try to return self as an [`ActiveCursor`]. + pub fn as_active(&self) -> Option<&ActiveCursor> { + match self { + MigrationCursor::Active(active) => Some(active), + MigrationCursor::Stuck => None, + } + } +} + +impl From> + for MigrationCursor +{ + fn from(active: ActiveCursor) -> Self { + MigrationCursor::Active(active) + } +} + +/// Points to the currently active migration and its inner cursor. +#[derive(Debug, Clone, Eq, PartialEq, Encode, Decode, scale_info::TypeInfo, MaxEncodedLen)] +pub struct ActiveCursor { + /// The index of the migration in the MBM tuple. + pub index: u32, + /// The cursor of the migration that is referenced by `index`. + pub inner_cursor: Option, + /// The block number that the migration started at. + /// + /// This is used to calculate how many blocks it took. + pub started_at: BlockNumber, +} + +impl ActiveCursor { + /// Advance the overarching cursor to the next migration. + pub(crate) fn goto_next_migration(&mut self, current_block: BlockNumber) { + self.index.saturating_inc(); + self.inner_cursor = None; + self.started_at = current_block; + } +} + +/// How to clear the records of historic migrations. +#[derive(Debug, Clone, Eq, PartialEq, Encode, Decode, scale_info::TypeInfo)] +pub enum HistoricCleanupSelector { + /// Clear exactly these entries. + /// + /// This is the advised way of doing it. + Specific(Vec), + + /// Clear up to this many entries + Wildcard { + /// How many should be cleared in this call at most. + limit: Option, + /// The cursor that was emitted from any previous `HistoricCleared`. + /// + /// Does not need to be passed when clearing the first batch. + previous_cursor: Option>, + }, +} + +/// The default number of entries that should be cleared by a `HistoricCleanupSelector::Wildcard`. +/// +/// The caller can explicitly specify a higher amount. Benchmarks are run with twice this value. +const DEFAULT_HISTORIC_BATCH_CLEAR_SIZE: u32 = 128; + +impl HistoricCleanupSelector { + /// The maximal number of entries that this will remove. + /// + /// Needed for weight calculation. + pub fn limit(&self) -> u32 { + match self { + Self::Specific(ids) => ids.len() as u32, + Self::Wildcard { limit, .. } => limit.unwrap_or(DEFAULT_HISTORIC_BATCH_CLEAR_SIZE), + } + } +} + +/// Convenience alias for [`MigrationCursor`]. +pub type CursorOf = MigrationCursor, BlockNumberFor>; + +/// Convenience alias for the raw inner cursor of a migration. +pub type RawCursorOf = BoundedVec::CursorMaxLen>; + +/// Convenience alias for the identifier of a migration. +pub type IdentifierOf = BoundedVec::IdentifierMaxLen>; + +/// Convenience alias for [`ActiveCursor`]. +pub type ActiveCursorOf = ActiveCursor, BlockNumberFor>; + +/// Trait for a tuple of No-OP migrations with one element. +pub trait MockedMigrations: SteppedMigrations { + /// The migration should fail after `n` steps. + fn set_fail_after(n: u32); + /// The migration should succeed after `n` steps. + fn set_success_after(n: u32); +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type of the runtime. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// All the multi-block migrations to run. + /// + /// Should only be updated in a runtime-upgrade once all the old migrations have completed. + /// (Check that [`Cursor`] is `None`). + #[cfg(not(feature = "runtime-benchmarks"))] + type Migrations: SteppedMigrations; + + /// Mocked migrations for benchmarking only. + /// + /// Should be configured to [`crate::mock_helpers::MockedMigrations`] in benchmarks. + #[cfg(feature = "runtime-benchmarks")] + type Migrations: MockedMigrations; + + /// The maximal length of an encoded cursor. + /// + /// A good default needs to selected such that no migration will ever have a cursor with MEL + /// above this limit. This is statically checked in `integrity_test`. + #[pallet::constant] + type CursorMaxLen: Get; + + /// The maximal length of an encoded identifier. + /// + /// A good default needs to selected such that no migration will ever have an identifier + /// with MEL above this limit. This is statically checked in `integrity_test`. + #[pallet::constant] + type IdentifierMaxLen: Get; + + /// Notifications for status updates of a runtime upgrade. + /// + /// Could be used to pause XCM etc. + type MigrationStatusHandler: MigrationStatusHandler; + + /// Handler for failed migrations. + type FailedMigrationHandler: FailedMigrationHandler; + + /// The maximum weight to spend each block to execute migrations. + type MaxServiceWeight: Get; + + /// Weight information for the calls and functions of this pallet. + type WeightInfo: WeightInfo; + } + + /// The currently active migration to run and its cursor. + /// + /// `None` indicates that no migration is running. + #[pallet::storage] + pub type Cursor = StorageValue<_, CursorOf, OptionQuery>; + + /// Set of all successfully executed migrations. + /// + /// This is used as blacklist, to not re-execute migrations that have not been removed from the + /// codebase yet. Governance can regularly clear this out via `clear_historic`. + #[pallet::storage] + pub type Historic = StorageMap<_, Twox64Concat, IdentifierOf, (), OptionQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A Runtime upgrade started. + /// + /// Its end is indicated by `UpgradeCompleted` or `UpgradeFailed`. + UpgradeStarted { + /// The number of migrations that this upgrade contains. + /// + /// This can be used to design a progress indicator in combination with counting the + /// `MigrationCompleted` and `MigrationSkipped` events. + migrations: u32, + }, + /// The current runtime upgrade completed. + /// + /// This implies that all of its migrations completed successfully as well. + UpgradeCompleted, + /// Runtime upgrade failed. + /// + /// This is very bad and will require governance intervention. + UpgradeFailed, + /// A migration was skipped since it was already executed in the past. + MigrationSkipped { + /// The index of the skipped migration within the [`Config::Migrations`] list. + index: u32, + }, + /// A migration progressed. + MigrationAdvanced { + /// The index of the migration within the [`Config::Migrations`] list. + index: u32, + /// The number of blocks that this migration took so far. + took: BlockNumberFor, + }, + /// A Migration completed. + MigrationCompleted { + /// The index of the migration within the [`Config::Migrations`] list. + index: u32, + /// The number of blocks that this migration took so far. + took: BlockNumberFor, + }, + /// A Migration failed. + /// + /// This implies that the whole upgrade failed and governance intervention is required. + MigrationFailed { + /// The index of the migration within the [`Config::Migrations`] list. + index: u32, + /// The number of blocks that this migration took so far. + took: BlockNumberFor, + }, + /// The set of historical migrations has been cleared. + HistoricCleared { + /// Should be passed to `clear_historic` in a successive call. + next_cursor: Option>, + }, + } + + #[pallet::error] + pub enum Error { + /// The operation cannot complete since some MBMs are ongoing. + Ongoing, + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_runtime_upgrade() -> Weight { + Self::onboard_new_mbms() + } + + #[cfg(feature = "std")] + fn integrity_test() { + // Check that the migrations tuple is legit. + frame_support::assert_ok!(T::Migrations::integrity_test()); + + // Very important! Ensure that the pallet is configured in `System::Config`. + { + assert!(!Cursor::::exists(), "Externalities storage should be clean"); + assert!(!::MultiBlockMigrator::ongoing()); + + Cursor::::put(MigrationCursor::Stuck); + assert!(::MultiBlockMigrator::ongoing()); + + Cursor::::kill(); + } + + // The per-block service weight is sane. + #[cfg(not(test))] + { + let want = T::MaxServiceWeight::get(); + let max = ::BlockWeights::get().max_block; + + assert!(want.all_lte(max), "Service weight is larger than a block: {want} > {max}",); + } + + // Cursor MEL + { + let mel = T::Migrations::cursor_max_encoded_len(); + let max_mel = T::CursorMaxLen::get() as usize; + assert!( + mel <= max_mel, + "A Cursor is not guaranteed to fit into the storage: {mel} > {max_mel}", + ); + } + + // Identifier MEL + { + let mel = T::Migrations::identifier_max_encoded_len(); + let max_mel = T::IdentifierMaxLen::get() as usize; + assert!( + mel <= max_mel, + "An Identifier is not guaranteed to fit into the storage: {mel} > {max_mel}", + ); + } + } + } + + #[pallet::call(weight = T::WeightInfo)] + impl Pallet { + /// Allows root to set a cursor to forcefully start, stop or forward the migration process. + /// + /// Should normally not be needed and is only in place as emergency measure. Note that + /// restarting the migration process in this manner will not call the + /// [`MigrationStatusHandler::started`] hook or emit an `UpgradeStarted` event. + #[pallet::call_index(0)] + pub fn force_set_cursor( + origin: OriginFor, + cursor: Option>, + ) -> DispatchResult { + ensure_root(origin)?; + + Cursor::::set(cursor); + + Ok(()) + } + + /// Allows root to set an active cursor to forcefully start/forward the migration process. + /// + /// This is an edge-case version of [`Self::force_set_cursor`] that allows to set the + /// `started_at` value to the next block number. Otherwise this would not be possible, since + /// `force_set_cursor` takes an absolute block number. Setting `started_at` to `None` + /// indicates that the current block number plus one should be used. + #[pallet::call_index(1)] + pub fn force_set_active_cursor( + origin: OriginFor, + index: u32, + inner_cursor: Option>, + started_at: Option>, + ) -> DispatchResult { + ensure_root(origin)?; + + let started_at = started_at.unwrap_or( + System::::block_number().saturating_add(sp_runtime::traits::One::one()), + ); + Cursor::::put(MigrationCursor::Active(ActiveCursor { + index, + inner_cursor, + started_at, + })); + + Ok(()) + } + + /// Forces the onboarding of the migrations. + /// + /// This process happens automatically on a runtime upgrade. It is in place as an emergency + /// measurement. The cursor needs to be `None` for this to succeed. + #[pallet::call_index(2)] + pub fn force_onboard_mbms(origin: OriginFor) -> DispatchResult { + ensure_root(origin)?; + + ensure!(!Cursor::::exists(), Error::::Ongoing); + Self::onboard_new_mbms(); + + Ok(()) + } + + /// Clears the `Historic` set. + /// + /// `map_cursor` must be set to the last value that was returned by the + /// `HistoricCleared` event. The first time `None` can be used. `limit` must be chosen in a + /// way that will result in a sensible weight. + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::clear_historic(selector.limit()))] + pub fn clear_historic( + origin: OriginFor, + selector: HistoricCleanupSelector>, + ) -> DispatchResult { + ensure_root(origin)?; + + match &selector { + HistoricCleanupSelector::Specific(ids) => { + for id in ids { + Historic::::remove(id); + } + Self::deposit_event(Event::HistoricCleared { next_cursor: None }); + }, + HistoricCleanupSelector::Wildcard { previous_cursor, .. } => { + let next = Historic::::clear(selector.limit(), previous_cursor.as_deref()); + Self::deposit_event(Event::HistoricCleared { next_cursor: next.maybe_cursor }); + }, + } + + Ok(()) + } + } +} + +impl Pallet { + /// Onboard all new Multi-Block-Migrations and start the process of executing them. + /// + /// Should only be called once all previous migrations completed. + fn onboard_new_mbms() -> Weight { + if let Some(cursor) = Cursor::::get() { + log::error!("Ongoing migrations interrupted - chain stuck"); + + let maybe_index = cursor.as_active().map(|c| c.index); + Self::upgrade_failed(maybe_index); + return T::WeightInfo::onboard_new_mbms() + } + + let migrations = T::Migrations::len(); + log::debug!("Onboarding {migrations} new MBM migrations"); + + if migrations > 0 { + // Set the cursor to the first migration: + Cursor::::set(Some( + ActiveCursor { + index: 0, + inner_cursor: None, + started_at: System::::block_number(), + } + .into(), + )); + Self::deposit_event(Event::UpgradeStarted { migrations }); + T::MigrationStatusHandler::started(); + } + + T::WeightInfo::onboard_new_mbms() + } + + /// Tries to make progress on the Multi-Block-Migrations process. + fn progress_mbms(n: BlockNumberFor) -> Weight { + let mut meter = WeightMeter::with_limit(T::MaxServiceWeight::get()); + meter.consume(T::WeightInfo::progress_mbms_none()); + + let mut cursor = match Cursor::::get() { + None => { + log::trace!("[Block {n:?}] Waiting for cursor to become `Some`."); + return meter.consumed() + }, + Some(MigrationCursor::Active(cursor)) => { + log::debug!("Progressing MBM #{}", cursor.index); + cursor + }, + Some(MigrationCursor::Stuck) => { + log::error!("Migration stuck. Governance intervention required."); + return meter.consumed() + }, + }; + debug_assert!(Self::ongoing()); + + // The limit here is a defensive measure to prevent an infinite loop. It expresses that we + // allow no more than 8 MBMs to finish in a single block. This should be harmless, since we + // generally expect *Multi*-Block-Migrations to take *multiple* blocks. + for i in 0..8 { + match Self::exec_migration(cursor, i == 0, &mut meter) { + None => return meter.consumed(), + Some(ControlFlow::Continue(next_cursor)) => { + cursor = next_cursor; + }, + Some(ControlFlow::Break(last_cursor)) => { + cursor = last_cursor; + break + }, + } + } + + Cursor::::set(Some(cursor.into())); + + meter.consumed() + } + + /// Try to make progress on the current migration. + /// + /// Returns whether processing should continue or break for this block. The return value means: + /// - `None`: The migration process is completely finished. + /// - `ControlFlow::Break`: Continue in the *next* block with the given cursor. + /// - `ControlFlow::Continue`: Continue in the *current* block with the given cursor. + fn exec_migration( + mut cursor: ActiveCursorOf, + is_first: bool, + meter: &mut WeightMeter, + ) -> Option, ActiveCursorOf>> { + // The differences between the single branches' weights is not that big. And since we do + // only one step per block, we can just use the maximum instead of more precise accounting. + if meter.try_consume(Self::exec_migration_max_weight()).is_err() { + defensive_assert!(!is_first, "There should be enough weight to do this at least once"); + return Some(ControlFlow::Break(cursor)) + } + + let Some(id) = T::Migrations::nth_id(cursor.index) else { + // No more migrations in the tuple - we are done. + defensive_assert!(cursor.index == T::Migrations::len(), "Inconsistent MBMs tuple"); + Self::deposit_event(Event::UpgradeCompleted); + Cursor::::kill(); + T::MigrationStatusHandler::completed(); + return None; + }; + + let Ok(bounded_id): Result, _> = id.try_into() else { + defensive!("integrity_test ensures that all identifiers' MEL bounds fit into CursorMaxLen; qed."); + Self::upgrade_failed(Some(cursor.index)); + return None + }; + + if Historic::::contains_key(&bounded_id) { + Self::deposit_event(Event::MigrationSkipped { index: cursor.index }); + cursor.goto_next_migration(System::::block_number()); + return Some(ControlFlow::Continue(cursor)) + } + + let max_steps = T::Migrations::nth_max_steps(cursor.index); + let next_cursor = T::Migrations::nth_transactional_step( + cursor.index, + cursor.inner_cursor.clone().map(|c| c.into_inner()), + meter, + ); + let Some((max_steps, next_cursor)) = max_steps.zip(next_cursor) else { + defensive!("integrity_test ensures that the tuple is valid; qed"); + Self::upgrade_failed(Some(cursor.index)); + return None + }; + + let took = System::::block_number().saturating_sub(cursor.started_at); + match next_cursor { + Ok(Some(next_cursor)) => { + let Ok(bound_next_cursor) = next_cursor.try_into() else { + defensive!("The integrity check ensures that all cursors' MEL bound fits into CursorMaxLen; qed"); + Self::upgrade_failed(Some(cursor.index)); + return None + }; + + Self::deposit_event(Event::MigrationAdvanced { index: cursor.index, took }); + cursor.inner_cursor = Some(bound_next_cursor); + + if max_steps.map_or(false, |max| took > max.into()) { + Self::deposit_event(Event::MigrationFailed { index: cursor.index, took }); + Self::upgrade_failed(Some(cursor.index)); + None + } else { + // A migration cannot progress more than one step per block, we therefore break. + Some(ControlFlow::Break(cursor)) + } + }, + Ok(None) => { + // A migration is done when it returns cursor `None`. + Self::deposit_event(Event::MigrationCompleted { index: cursor.index, took }); + Historic::::insert(&bounded_id, ()); + cursor.goto_next_migration(System::::block_number()); + Some(ControlFlow::Continue(cursor)) + }, + Err(SteppedMigrationError::InsufficientWeight { required }) => { + if is_first || required.any_gt(meter.limit()) { + Self::deposit_event(Event::MigrationFailed { index: cursor.index, took }); + Self::upgrade_failed(Some(cursor.index)); + None + } else { + // Retry and hope that there is more weight in the next block. + Some(ControlFlow::Break(cursor)) + } + }, + Err(SteppedMigrationError::InvalidCursor | SteppedMigrationError::Failed) => { + Self::deposit_event(Event::MigrationFailed { index: cursor.index, took }); + Self::upgrade_failed(Some(cursor.index)); + None + }, + } + } + + /// Fail the current runtime upgrade, caused by `migration`. + fn upgrade_failed(migration: Option) { + use FailedMigrationHandling::*; + Self::deposit_event(Event::UpgradeFailed); + + match T::FailedMigrationHandler::failed(migration) { + KeepStuck => Cursor::::set(Some(MigrationCursor::Stuck)), + ForceUnstuck => Cursor::::kill(), + Ignore => {}, + } + } + + fn exec_migration_max_weight() -> Weight { + T::WeightInfo::exec_migration_complete() + .max(T::WeightInfo::exec_migration_completed()) + .max(T::WeightInfo::exec_migration_skipped_historic()) + .max(T::WeightInfo::exec_migration_advance()) + .max(T::WeightInfo::exec_migration_fail()) + } +} + +impl MultiStepMigrator for Pallet { + fn ongoing() -> bool { + Cursor::::exists() + } + + fn step() -> Weight { + Self::progress_mbms(System::::block_number()) + } +} diff --git a/substrate/frame/migrations/src/mock.rs b/substrate/frame/migrations/src/mock.rs new file mode 100644 index 00000000000..37292647554 --- /dev/null +++ b/substrate/frame/migrations/src/mock.rs @@ -0,0 +1,163 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Mocked runtime for testing the migrations pallet. + +#![cfg(test)] + +use crate::{mock_helpers::*, Event, Historic}; + +use frame_support::{ + derive_impl, + migrations::*, + traits::{OnFinalize, OnInitialize}, + weights::Weight, +}; +use frame_system::EventRecord; +use sp_core::{ConstU32, H256}; + +type Block = frame_system::mocking::MockBlock; + +// Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub enum Test { + System: frame_system, + Migrations: crate, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; + type PalletInfo = PalletInfo; + type MultiBlockMigrator = Migrations; +} + +frame_support::parameter_types! { + pub const MaxServiceWeight: Weight = Weight::MAX.div(10); +} + +impl crate::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Migrations = MockedMigrations; + type CursorMaxLen = ConstU32<65_536>; + type IdentifierMaxLen = ConstU32<256>; + type MigrationStatusHandler = MockedMigrationStatusHandler; + type FailedMigrationHandler = MockedFailedMigrationHandler; + type MaxServiceWeight = MaxServiceWeight; + type WeightInfo = (); +} + +frame_support::parameter_types! { + /// The number of started upgrades. + pub static UpgradesStarted: u32 = 0; + /// The number of completed upgrades. + pub static UpgradesCompleted: u32 = 0; + /// The migrations that failed. + pub static UpgradesFailed: Vec> = vec![]; + /// Return value of [`MockedFailedMigrationHandler::failed`]. + pub static FailedUpgradeResponse: FailedMigrationHandling = FailedMigrationHandling::KeepStuck; +} + +/// Records all started and completed upgrades in `UpgradesStarted` and `UpgradesCompleted`. +pub struct MockedMigrationStatusHandler; +impl MigrationStatusHandler for MockedMigrationStatusHandler { + fn started() { + log::info!("MigrationStatusHandler started"); + UpgradesStarted::mutate(|v| *v += 1); + } + + fn completed() { + log::info!("MigrationStatusHandler completed"); + UpgradesCompleted::mutate(|v| *v += 1); + } +} + +/// Records all failed upgrades in `UpgradesFailed`. +pub struct MockedFailedMigrationHandler; +impl FailedMigrationHandler for MockedFailedMigrationHandler { + fn failed(migration: Option) -> FailedMigrationHandling { + UpgradesFailed::mutate(|v| v.push(migration)); + let res = FailedUpgradeResponse::get(); + log::error!("FailedMigrationHandler failed at: {migration:?}, handling as {res:?}"); + res + } +} + +/// Returns the number of `(started, completed, failed)` upgrades and resets their numbers. +pub fn upgrades_started_completed_failed() -> (u32, u32, u32) { + (UpgradesStarted::take(), UpgradesCompleted::take(), UpgradesFailed::take().len() as u32) +} + +/// Build genesis storage according to the mock runtime. +pub fn new_test_ext() -> sp_io::TestExternalities { + sp_io::TestExternalities::new(Default::default()) +} + +/// Run this closure in test externalities. +pub fn test_closure(f: impl FnOnce() -> R) -> R { + let mut ext = new_test_ext(); + ext.execute_with(f) +} + +pub fn run_to_block(n: u32) { + while System::block_number() < n as u64 { + log::debug!("Block {}", System::block_number()); + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + Migrations::on_initialize(System::block_number()); + // Executive calls this: + ::step(); + + Migrations::on_finalize(System::block_number()); + System::on_finalize(System::block_number()); + } +} + +/// Returns the historic migrations, sorted by their identifier. +pub fn historic() -> Vec { + let mut historic = Historic::::iter_keys().collect::>(); + historic.sort(); + historic +} + +// Traits to make using events less insufferable: +pub trait IntoRecord { + fn into_record(self) -> EventRecord<::RuntimeEvent, H256>; +} + +impl IntoRecord for Event { + fn into_record(self) -> EventRecord<::RuntimeEvent, H256> { + let re: ::RuntimeEvent = self.into(); + EventRecord { phase: frame_system::Phase::Initialization, event: re, topics: vec![] } + } +} + +pub trait IntoRecords { + fn into_records(self) -> Vec::RuntimeEvent, H256>>; +} + +impl IntoRecords for Vec { + fn into_records(self) -> Vec::RuntimeEvent, H256>> { + self.into_iter().map(|e| e.into_record()).collect() + } +} + +pub fn assert_events(events: Vec) { + pretty_assertions::assert_eq!(events.into_records(), System::events()); + System::reset_events(); +} diff --git a/substrate/frame/migrations/src/mock_helpers.rs b/substrate/frame/migrations/src/mock_helpers.rs new file mode 100644 index 00000000000..c5e23efb4e3 --- /dev/null +++ b/substrate/frame/migrations/src/mock_helpers.rs @@ -0,0 +1,142 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test helpers for internal and external usage. + +#![allow(missing_docs)] + +use codec::{Decode, Encode}; +use frame_support::{ + migrations::*, + weights::{Weight, WeightMeter}, +}; +use sp_core::ConstU32; +use sp_runtime::BoundedVec; +use sp_std::{vec, vec::Vec}; + +/// Opaque identifier of a migration. +pub type MockedIdentifier = BoundedVec>; + +/// How a mocked migration should behave. +#[derive(Debug, Clone, Copy, Encode, Decode)] +pub enum MockedMigrationKind { + /// Succeed after its number of steps elapsed. + SucceedAfter, + /// Fail after its number of steps elapsed. + FailAfter, + /// Never terminate. + TimeoutAfter, + /// Cause an [`SteppedMigrationError::InsufficientWeight`] error after its number of steps + /// elapsed. + HighWeightAfter(Weight), +} +use MockedMigrationKind::*; // C style + +/// Creates a migration identifier with a specific `kind` and `steps`. +pub fn mocked_id(kind: MockedMigrationKind, steps: u32) -> MockedIdentifier { + (b"MockedMigration", kind, steps).encode().try_into().unwrap() +} + +frame_support::parameter_types! { + /// The configs for the migrations to run. + storage MIGRATIONS: Vec<(MockedMigrationKind, u32)> = vec![]; +} + +/// Allows to set the migrations to run at runtime instead of compile-time. +/// +/// It achieves this by using the storage to store the migrations to run. +pub struct MockedMigrations; +impl SteppedMigrations for MockedMigrations { + fn len() -> u32 { + MIGRATIONS::get().len() as u32 + } + + fn nth_id(n: u32) -> Option> { + let k = MIGRATIONS::get().get(n as usize).copied(); + k.map(|(kind, steps)| mocked_id(kind, steps).into_inner()) + } + + fn nth_step( + n: u32, + cursor: Option>, + _meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + let (kind, steps) = MIGRATIONS::get()[n as usize]; + + let mut count: u32 = + cursor.as_ref().and_then(|c| Decode::decode(&mut &c[..]).ok()).unwrap_or(0); + log::debug!("MockedMigration: Step {}", count); + if count != steps || matches!(kind, TimeoutAfter) { + count += 1; + return Some(Ok(Some(count.encode()))) + } + + Some(match kind { + SucceedAfter => { + log::debug!("MockedMigration: Succeeded after {} steps", count); + Ok(None) + }, + HighWeightAfter(required) => { + log::debug!("MockedMigration: Not enough weight after {} steps", count); + Err(SteppedMigrationError::InsufficientWeight { required }) + }, + FailAfter => { + log::debug!("MockedMigration: Failed after {} steps", count); + Err(SteppedMigrationError::Failed) + }, + TimeoutAfter => unreachable!(), + }) + } + + fn nth_transactional_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + // This is a hack but should be fine. We dont need it in testing. + Self::nth_step(n, cursor, meter) + } + + fn nth_max_steps(n: u32) -> Option> { + MIGRATIONS::get().get(n as usize).map(|(_, s)| Some(*s)) + } + + fn cursor_max_encoded_len() -> usize { + 65_536 + } + + fn identifier_max_encoded_len() -> usize { + 256 + } +} + +impl MockedMigrations { + /// Set the migrations to run. + pub fn set(migrations: Vec<(MockedMigrationKind, u32)>) { + MIGRATIONS::set(&migrations); + } +} + +impl crate::MockedMigrations for MockedMigrations { + fn set_fail_after(steps: u32) { + MIGRATIONS::set(&vec![(FailAfter, steps)]); + } + + fn set_success_after(steps: u32) { + MIGRATIONS::set(&vec![(SucceedAfter, steps)]); + } +} diff --git a/substrate/frame/migrations/src/tests.rs b/substrate/frame/migrations/src/tests.rs new file mode 100644 index 00000000000..9c9043d37a6 --- /dev/null +++ b/substrate/frame/migrations/src/tests.rs @@ -0,0 +1,335 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(test)] + +use crate::{ + mock::{Test as T, *}, + mock_helpers::{MockedMigrationKind::*, *}, + Cursor, Event, FailedMigrationHandling, MigrationCursor, +}; +use frame_support::{pallet_prelude::Weight, traits::OnRuntimeUpgrade}; + +#[docify::export] +#[test] +fn simple_works() { + use Event::*; + test_closure(|| { + // Add three migrations, each taking one block longer than the previous. + MockedMigrations::set(vec![(SucceedAfter, 0), (SucceedAfter, 1), (SucceedAfter, 2)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + // Check that the executed migrations are recorded in `Historical`. + assert_eq!( + historic(), + vec![ + mocked_id(SucceedAfter, 0), + mocked_id(SucceedAfter, 1), + mocked_id(SucceedAfter, 2), + ] + ); + + // Check that we got all events. + assert_events(vec![ + UpgradeStarted { migrations: 3 }, + MigrationCompleted { index: 0, took: 1 }, + MigrationAdvanced { index: 1, took: 0 }, + MigrationCompleted { index: 1, took: 1 }, + MigrationAdvanced { index: 2, took: 0 }, + MigrationAdvanced { index: 2, took: 1 }, + MigrationCompleted { index: 2, took: 2 }, + UpgradeCompleted, + ]); + }); +} + +#[test] +fn failing_migration_sets_cursor_to_stuck() { + test_closure(|| { + FailedUpgradeResponse::set(FailedMigrationHandling::KeepStuck); + MockedMigrations::set(vec![(FailAfter, 2)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + // Failed migrations are not recorded in `Historical`. + assert!(historic().is_empty()); + // Check that we got all events. + assert_events(vec![ + Event::UpgradeStarted { migrations: 1 }, + Event::MigrationAdvanced { index: 0, took: 1 }, + Event::MigrationAdvanced { index: 0, took: 2 }, + Event::MigrationFailed { index: 0, took: 3 }, + Event::UpgradeFailed, + ]); + + // Check that the handler was called correctly. + assert_eq!(UpgradesStarted::take(), 1); + assert_eq!(UpgradesCompleted::take(), 0); + assert_eq!(UpgradesFailed::take(), vec![Some(0)]); + + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck), "Must stuck the chain"); + }); +} + +#[test] +fn failing_migration_force_unstuck_works() { + test_closure(|| { + FailedUpgradeResponse::set(FailedMigrationHandling::ForceUnstuck); + MockedMigrations::set(vec![(FailAfter, 2)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + // Failed migrations are not recorded in `Historical`. + assert!(historic().is_empty()); + // Check that we got all events. + assert_events(vec![ + Event::UpgradeStarted { migrations: 1 }, + Event::MigrationAdvanced { index: 0, took: 1 }, + Event::MigrationAdvanced { index: 0, took: 2 }, + Event::MigrationFailed { index: 0, took: 3 }, + Event::UpgradeFailed, + ]); + + // Check that the handler was called correctly. + assert_eq!(UpgradesStarted::take(), 1); + assert_eq!(UpgradesCompleted::take(), 0); + assert_eq!(UpgradesFailed::take(), vec![Some(0)]); + + assert!(Cursor::::get().is_none(), "Must unstuck the chain"); + }); +} + +/// A migration that reports not getting enough weight errors if it is the first one to run in that +/// block. +#[test] +fn high_weight_migration_singular_fails() { + test_closure(|| { + MockedMigrations::set(vec![(HighWeightAfter(Weight::zero()), 2)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + // Failed migrations are not recorded in `Historical`. + assert!(historic().is_empty()); + // Check that we got all events. + assert_events(vec![ + Event::UpgradeStarted { migrations: 1 }, + Event::MigrationAdvanced { index: 0, took: 1 }, + Event::MigrationAdvanced { index: 0, took: 2 }, + Event::MigrationFailed { index: 0, took: 3 }, + Event::UpgradeFailed, + ]); + + // Check that the handler was called correctly. + assert_eq!(upgrades_started_completed_failed(), (1, 0, 1)); + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + }); +} + +/// A migration that reports of not getting enough weight is retried once, if it is not the first +/// one to run in a block. +#[test] +fn high_weight_migration_retries_once() { + test_closure(|| { + MockedMigrations::set(vec![(SucceedAfter, 0), (HighWeightAfter(Weight::zero()), 0)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + assert_eq!(historic(), vec![mocked_id(SucceedAfter, 0)]); + // Check that we got all events. + assert_events::>(vec![ + Event::UpgradeStarted { migrations: 2 }, + Event::MigrationCompleted { index: 0, took: 1 }, + // `took=1` means that it was retried once. + Event::MigrationFailed { index: 1, took: 1 }, + Event::UpgradeFailed, + ]); + + // Check that the handler was called correctly. + assert_eq!(upgrades_started_completed_failed(), (1, 0, 1)); + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + }); +} + +/// If a migration uses more weight than the limit, then it will not retry but fail even when it is +/// not the first one in the block. +// Note: Same as `high_weight_migration_retries_once` but with different required weight for the +// migration. +#[test] +fn high_weight_migration_permanently_overweight_fails() { + test_closure(|| { + MockedMigrations::set(vec![(SucceedAfter, 0), (HighWeightAfter(Weight::MAX), 0)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + assert_eq!(historic(), vec![mocked_id(SucceedAfter, 0)]); + // Check that we got all events. + assert_events::>(vec![ + Event::UpgradeStarted { migrations: 2 }, + Event::MigrationCompleted { index: 0, took: 1 }, + // `blocks=0` means that it was not retried. + Event::MigrationFailed { index: 1, took: 0 }, + Event::UpgradeFailed, + ]); + + // Check that the handler was called correctly. + assert_eq!(upgrades_started_completed_failed(), (1, 0, 1)); + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + }); +} + +#[test] +fn historic_skipping_works() { + test_closure(|| { + MockedMigrations::set(vec![ + (SucceedAfter, 0), + (SucceedAfter, 0), // duplicate + (SucceedAfter, 1), + (SucceedAfter, 2), + (SucceedAfter, 1), // duplicate + ]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + // Just three historical ones, since two were added twice. + assert_eq!( + historic(), + vec![ + mocked_id(SucceedAfter, 0), + mocked_id(SucceedAfter, 1), + mocked_id(SucceedAfter, 2), + ] + ); + // Events received. + assert_events(vec![ + Event::UpgradeStarted { migrations: 5 }, + Event::MigrationCompleted { index: 0, took: 1 }, + Event::MigrationSkipped { index: 1 }, + Event::MigrationAdvanced { index: 2, took: 0 }, + Event::MigrationCompleted { index: 2, took: 1 }, + Event::MigrationAdvanced { index: 3, took: 0 }, + Event::MigrationAdvanced { index: 3, took: 1 }, + Event::MigrationCompleted { index: 3, took: 2 }, + Event::MigrationSkipped { index: 4 }, + Event::UpgradeCompleted, + ]); + assert_eq!(upgrades_started_completed_failed(), (1, 1, 0)); + + // Now go for another upgrade; just to make sure that it wont execute again. + System::reset_events(); + Migrations::on_runtime_upgrade(); + run_to_block(20); + + // Same historical ones as before. + assert_eq!( + historic(), + vec![ + mocked_id(SucceedAfter, 0), + mocked_id(SucceedAfter, 1), + mocked_id(SucceedAfter, 2), + ] + ); + + // Everything got skipped. + assert_events(vec![ + Event::UpgradeStarted { migrations: 5 }, + Event::MigrationSkipped { index: 0 }, + Event::MigrationSkipped { index: 1 }, + Event::MigrationSkipped { index: 2 }, + Event::MigrationSkipped { index: 3 }, + Event::MigrationSkipped { index: 4 }, + Event::UpgradeCompleted, + ]); + assert_eq!(upgrades_started_completed_failed(), (1, 1, 0)); + }); +} + +/// When another upgrade happens while a migration is still running, it should set the cursor to +/// stuck. +#[test] +fn upgrade_fails_when_migration_active() { + test_closure(|| { + MockedMigrations::set(vec![(SucceedAfter, 10)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(3); + + // Events received. + assert_events(vec![ + Event::UpgradeStarted { migrations: 1 }, + Event::MigrationAdvanced { index: 0, took: 1 }, + Event::MigrationAdvanced { index: 0, took: 2 }, + ]); + assert_eq!(upgrades_started_completed_failed(), (1, 0, 0)); + + // Upgrade again. + Migrations::on_runtime_upgrade(); + // -- Defensive path -- + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + assert_events(vec![Event::UpgradeFailed]); + assert_eq!(upgrades_started_completed_failed(), (0, 0, 1)); + }); +} + +#[test] +fn migration_timeout_errors() { + test_closure(|| { + MockedMigrations::set(vec![(TimeoutAfter, 3)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(5); + + // Times out after taking more than 3 steps. + assert_events(vec![ + Event::UpgradeStarted { migrations: 1 }, + Event::MigrationAdvanced { index: 0, took: 1 }, + Event::MigrationAdvanced { index: 0, took: 2 }, + Event::MigrationAdvanced { index: 0, took: 3 }, + Event::MigrationAdvanced { index: 0, took: 4 }, + Event::MigrationFailed { index: 0, took: 4 }, + Event::UpgradeFailed, + ]); + assert_eq!(upgrades_started_completed_failed(), (1, 0, 1)); + + // Failed migrations are not black-listed. + assert!(historic().is_empty()); + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + + Migrations::on_runtime_upgrade(); + run_to_block(6); + + assert_events(vec![Event::UpgradeFailed]); + assert_eq!(Cursor::::get(), Some(MigrationCursor::Stuck)); + assert_eq!(upgrades_started_completed_failed(), (0, 0, 1)); + }); +} diff --git a/substrate/frame/migrations/src/weights.rs b/substrate/frame/migrations/src/weights.rs new file mode 100644 index 00000000000..c9b63258c44 --- /dev/null +++ b/substrate/frame/migrations/src/weights.rs @@ -0,0 +1,358 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_migrations` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `loud1`, CPU: `AMD EPYC 7282 16-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// target/release/substrate-node +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet-migrations +// --extrinsic +// +// --output +// weight.rs +// --template +// ../../polkadot-sdk/substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_migrations`. +pub trait WeightInfo { + fn onboard_new_mbms() -> Weight; + fn progress_mbms_none() -> Weight; + fn exec_migration_completed() -> Weight; + fn exec_migration_skipped_historic() -> Weight; + fn exec_migration_advance() -> Weight; + fn exec_migration_complete() -> Weight; + fn exec_migration_fail() -> Weight; + fn on_init_loop() -> Weight; + fn force_set_cursor() -> Weight; + fn force_set_active_cursor() -> Weight; + fn force_onboard_mbms() -> Weight; + fn clear_historic(n: u32, ) -> Weight; +} + +/// Weights for `pallet_migrations` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn onboard_new_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `67035` + // Minimum execution time: 13_980_000 picoseconds. + Weight::from_parts(14_290_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn progress_mbms_none() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `67035` + // Minimum execution time: 3_770_000 picoseconds. + Weight::from_parts(4_001_000, 67035) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_completed() -> Weight { + // Proof Size summary in bytes: + // Measured: `134` + // Estimated: `3599` + // Minimum execution time: 10_900_000 picoseconds. + Weight::from_parts(11_251_000, 3599) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_skipped_historic() -> Weight { + // Proof Size summary in bytes: + // Measured: `297` + // Estimated: `3762` + // Minimum execution time: 17_891_000 picoseconds. + Weight::from_parts(18_501_000, 3762) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_advance() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3731` + // Minimum execution time: 18_271_000 picoseconds. + Weight::from_parts(18_740_000, 3731) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_complete() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3731` + // Minimum execution time: 21_241_000 picoseconds. + Weight::from_parts(21_911_000, 3731) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3731` + // Minimum execution time: 22_740_000 picoseconds. + Weight::from_parts(23_231_000, 3731) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + fn on_init_loop() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 440_000 picoseconds. + Weight::from_parts(500_000, 0) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_751_000 picoseconds. + Weight::from_parts(5_950_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_active_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_350_000 picoseconds. + Weight::from_parts(6_560_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn force_onboard_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `218` + // Estimated: `67035` + // Minimum execution time: 11_121_000 picoseconds. + Weight::from_parts(11_530_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 256]`. + fn clear_historic(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1089 + n * (271 Β±0)` + // Estimated: `3834 + n * (2740 Β±0)` + // Minimum execution time: 21_891_000 picoseconds. + Weight::from_parts(18_572_306, 3834) + // Standard Error: 3_236 + .saturating_add(Weight::from_parts(1_648_429, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2740).saturating_mul(n.into())) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn onboard_new_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `67035` + // Minimum execution time: 13_980_000 picoseconds. + Weight::from_parts(14_290_000, 67035) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn progress_mbms_none() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `67035` + // Minimum execution time: 3_770_000 picoseconds. + Weight::from_parts(4_001_000, 67035) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_completed() -> Weight { + // Proof Size summary in bytes: + // Measured: `134` + // Estimated: `3599` + // Minimum execution time: 10_900_000 picoseconds. + Weight::from_parts(11_251_000, 3599) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_skipped_historic() -> Weight { + // Proof Size summary in bytes: + // Measured: `297` + // Estimated: `3762` + // Minimum execution time: 17_891_000 picoseconds. + Weight::from_parts(18_501_000, 3762) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_advance() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3731` + // Minimum execution time: 18_271_000 picoseconds. + Weight::from_parts(18_740_000, 3731) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_complete() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3731` + // Minimum execution time: 21_241_000 picoseconds. + Weight::from_parts(21_911_000, 3731) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3731` + // Minimum execution time: 22_740_000 picoseconds. + Weight::from_parts(23_231_000, 3731) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + fn on_init_loop() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 440_000 picoseconds. + Weight::from_parts(500_000, 0) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_751_000 picoseconds. + Weight::from_parts(5_950_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_active_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_350_000 picoseconds. + Weight::from_parts(6_560_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn force_onboard_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `218` + // Estimated: `67035` + // Minimum execution time: 11_121_000 picoseconds. + Weight::from_parts(11_530_000, 67035) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } + /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 256]`. + fn clear_historic(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1089 + n * (271 Β±0)` + // Estimated: `3834 + n * (2740 Β±0)` + // Minimum execution time: 21_891_000 picoseconds. + Weight::from_parts(18_572_306, 3834) + // Standard Error: 3_236 + .saturating_add(Weight::from_parts(1_648_429, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2740).saturating_mul(n.into())) + } +} diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index a2e5d726fdd..52db7c34bfd 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -197,7 +197,7 @@ pub mod runtime { // Types often used in the runtime APIs. pub use sp_core::OpaqueMetadata; pub use sp_inherents::{CheckInherentsResult, InherentData}; - pub use sp_runtime::ApplyExtrinsicResult; + pub use sp_runtime::{ApplyExtrinsicResult, ExtrinsicInclusionMode}; pub use frame_system_rpc_runtime_api::*; pub use sp_api::{self, *}; diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs index 34b9d21d8ce..da483fa6cf0 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -204,47 +204,50 @@ pub fn expand_outer_inherent( } } + impl #scrate::traits::IsInherent<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic> for #runtime { + fn is_inherent(ext: &<#block as #scrate::sp_runtime::traits::Block>::Extrinsic) -> bool { + use #scrate::inherent::ProvideInherent; + use #scrate::traits::{IsSubType, ExtrinsicCall}; + + if #scrate::sp_runtime::traits::Extrinsic::is_signed(ext).unwrap_or(false) { + // Signed extrinsics are never inherents. + return false + } + + #( + #pallet_attrs + { + let call = <#unchecked_extrinsic as ExtrinsicCall>::call(ext); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + if <#pallet_names as ProvideInherent>::is_inherent(&call) { + return true; + } + } + } + )* + false + } + } + impl #scrate::traits::EnsureInherentsAreFirst<#block> for #runtime { - fn ensure_inherents_are_first(block: &#block) -> Result<(), u32> { + fn ensure_inherents_are_first(block: &#block) -> Result { use #scrate::inherent::ProvideInherent; use #scrate::traits::{IsSubType, ExtrinsicCall}; use #scrate::sp_runtime::traits::Block as _; - let mut first_signed_observed = false; + let mut num_inherents = 0u32; for (i, xt) in block.extrinsics().iter().enumerate() { - let is_signed = #scrate::sp_runtime::traits::Extrinsic::is_signed(xt) - .unwrap_or(false); - - let is_inherent = if is_signed { - // Signed extrinsics are not inherents. - false - } else { - let mut is_inherent = false; - #( - #pallet_attrs - { - let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); - if let Some(call) = IsSubType::<_>::is_sub_type(call) { - if #pallet_names::is_inherent(&call) { - is_inherent = true; - } - } - } - )* - is_inherent - }; - - if !is_inherent { - first_signed_observed = true; - } + if >::is_inherent(xt) { + if num_inherents != i as u32 { + return Err(i as u32); + } - if first_signed_observed && is_inherent { - return Err(i as u32) + num_inherents += 1; // Safe since we are in an `enumerate` loop. } } - Ok(()) + Ok(num_inherents) } } } diff --git a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs index 6b25ddcba1a..3623b595268 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs @@ -175,6 +175,22 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { } } + impl<#type_impl_gen> + #frame_support::traits::OnPoll<#frame_system::pallet_prelude::BlockNumberFor::> + for #pallet_ident<#type_use_gen> #where_clause + { + fn on_poll( + n: #frame_system::pallet_prelude::BlockNumberFor::, + weight: &mut #frame_support::weights::WeightMeter + ) { + < + Self as #frame_support::traits::Hooks< + #frame_system::pallet_prelude::BlockNumberFor:: + > + >::on_poll(n, weight); + } + } + impl<#type_impl_gen> #frame_support::traits::OnInitialize<#frame_system::pallet_prelude::BlockNumberFor::> for #pallet_ident<#type_use_gen> #where_clause diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index f7541059023..2ceab44cb16 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -16,13 +16,21 @@ // limitations under the License. use crate::{ - traits::{GetStorageVersion, NoStorageVersionSet, PalletInfoAccess, StorageVersion}, - weights::{RuntimeDbWeight, Weight}, + defensive, + storage::transactional::with_transaction_opaque_err, + traits::{ + Defensive, GetStorageVersion, NoStorageVersionSet, PalletInfoAccess, SafeMode, + StorageVersion, + }, + weights::{RuntimeDbWeight, Weight, WeightMeter}, }; +use codec::{Decode, Encode, MaxEncodedLen}; use impl_trait_for_tuples::impl_for_tuples; +use sp_arithmetic::traits::Bounded; use sp_core::Get; use sp_io::{hashing::twox_128, storage::clear_prefix, KillStorageResult}; -use sp_std::marker::PhantomData; +use sp_runtime::traits::Zero; +use sp_std::{marker::PhantomData, vec::Vec}; /// Handles storage migration pallet versioning. /// @@ -91,7 +99,7 @@ pub struct VersionedMigration), @@ -118,7 +126,6 @@ impl< /// migration ran or not. #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - use codec::Encode; let on_chain_version = Pallet::on_chain_storage_version(); if on_chain_version == FROM { Ok(VersionedPostUpgradeData::MigrationExecuted(Inner::pre_upgrade()?).encode()) @@ -361,3 +368,622 @@ impl, DbWeight: Get> frame_support::traits Ok(()) } } + +/// A migration that can proceed in multiple steps. +pub trait SteppedMigration { + /// The cursor type that stores the progress (aka. state) of this migration. + type Cursor: codec::FullCodec + codec::MaxEncodedLen; + + /// The unique identifier type of this migration. + type Identifier: codec::FullCodec + codec::MaxEncodedLen; + + /// The unique identifier of this migration. + /// + /// If two migrations have the same identifier, then they are assumed to be identical. + fn id() -> Self::Identifier; + + /// The maximum number of steps that this migration can take. + /// + /// This can be used to enforce progress and prevent migrations becoming stuck forever. A + /// migration that exceeds its max steps is treated as failed. `None` means that there is no + /// limit. + fn max_steps() -> Option { + None + } + + /// Try to migrate as much as possible with the given weight. + /// + /// **ANY STORAGE CHANGES MUST BE ROLLED-BACK BY THE CALLER UPON ERROR.** This is necessary + /// since the caller cannot return a cursor in the error case. [`Self::transactional_step`] is + /// provided as convenience for a caller. A cursor of `None` implies that the migration is at + /// its end. A migration that once returned `Nonce` is guaranteed to never be called again. + fn step( + cursor: Option, + meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError>; + + /// Same as [`Self::step`], but rolls back pending changes in the error case. + fn transactional_step( + mut cursor: Option, + meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError> { + with_transaction_opaque_err(move || match Self::step(cursor, meter) { + Ok(new_cursor) => { + cursor = new_cursor; + sp_runtime::TransactionOutcome::Commit(Ok(cursor)) + }, + Err(err) => sp_runtime::TransactionOutcome::Rollback(Err(err)), + }) + .map_err(|()| SteppedMigrationError::Failed)? + } +} + +/// Error that can occur during a [`SteppedMigration`]. +#[derive(Debug, Encode, Decode, MaxEncodedLen, scale_info::TypeInfo)] +pub enum SteppedMigrationError { + // Transient errors: + /// The remaining weight is not enough to do anything. + /// + /// Can be resolved by calling with at least `required` weight. Note that calling it with + /// exactly `required` weight could cause it to not make any progress. + InsufficientWeight { + /// Amount of weight required to make progress. + required: Weight, + }, + // Permanent errors: + /// The migration cannot decode its cursor and therefore not proceed. + /// + /// This should not happen unless (1) the migration itself returned an invalid cursor in a + /// previous iteration, (2) the storage got corrupted or (3) there is a bug in the caller's + /// code. + InvalidCursor, + /// The migration encountered a permanent error and cannot continue. + Failed, +} + +/// Notification handler for status updates regarding Multi-Block-Migrations. +#[impl_trait_for_tuples::impl_for_tuples(8)] +pub trait MigrationStatusHandler { + /// Notifies of the start of a runtime migration. + fn started() {} + + /// Notifies of the completion of a runtime migration. + fn completed() {} +} + +/// Handles a failed runtime migration. +/// +/// This should never happen, but is here for completeness. +pub trait FailedMigrationHandler { + /// Infallibly handle a failed runtime migration. + /// + /// Gets passed in the optional index of the migration in the batch that caused the failure. + /// Returning `None` means that no automatic handling should take place and the callee decides + /// in the implementation what to do. + fn failed(migration: Option) -> FailedMigrationHandling; +} + +/// Do now allow any transactions to be processed after a runtime upgrade failed. +/// +/// This is **not a sane default**, since it prevents governance intervention. +pub struct FreezeChainOnFailedMigration; + +impl FailedMigrationHandler for FreezeChainOnFailedMigration { + fn failed(_migration: Option) -> FailedMigrationHandling { + FailedMigrationHandling::KeepStuck + } +} + +/// Enter safe mode on a failed runtime upgrade. +/// +/// This can be very useful to manually intervene and fix the chain state. `Else` is used in case +/// that the safe mode could not be entered. +pub struct EnterSafeModeOnFailedMigration( + PhantomData<(SM, Else)>, +); + +impl FailedMigrationHandler + for EnterSafeModeOnFailedMigration +where + ::BlockNumber: Bounded, +{ + fn failed(migration: Option) -> FailedMigrationHandling { + let entered = if SM::is_entered() { + SM::extend(Bounded::max_value()) + } else { + SM::enter(Bounded::max_value()) + }; + + // If we could not enter or extend safe mode (for whatever reason), then we try the next. + if entered.is_err() { + Else::failed(migration) + } else { + FailedMigrationHandling::KeepStuck + } + } +} + +/// How to proceed after a runtime upgrade failed. +/// +/// There is NO SANE DEFAULT HERE. All options are very dangerous and should be used with care. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum FailedMigrationHandling { + /// Resume extrinsic processing of the chain. This will not resume the upgrade. + /// + /// This should be supplemented with additional measures to ensure that the broken chain state + /// does not get further messed up by user extrinsics. + ForceUnstuck, + /// Set the cursor to `Stuck` and keep blocking extrinsics. + KeepStuck, + /// Don't do anything with the cursor and let the handler decide. + /// + /// This can be useful in cases where the other two options would overwrite any changes that + /// were done by the handler to the cursor. + Ignore, +} + +/// Something that can do multi step migrations. +pub trait MultiStepMigrator { + /// Hint for whether [`Self::step`] should be called. + fn ongoing() -> bool; + + /// Do the next step in the MBM process. + /// + /// Must gracefully handle the case that it is currently not upgrading. + fn step() -> Weight; +} + +impl MultiStepMigrator for () { + fn ongoing() -> bool { + false + } + + fn step() -> Weight { + Weight::zero() + } +} + +/// Multiple [`SteppedMigration`]. +pub trait SteppedMigrations { + /// The number of migrations that `Self` aggregates. + fn len() -> u32; + + /// The `n`th [`SteppedMigration::id`]. + /// + /// Is guaranteed to return `Some` if `n < Self::len()`. + fn nth_id(n: u32) -> Option>; + + /// The [`SteppedMigration::max_steps`] of the `n`th migration. + /// + /// Is guaranteed to return `Some` if `n < Self::len()`. + fn nth_max_steps(n: u32) -> Option>; + + /// Do a [`SteppedMigration::step`] on the `n`th migration. + /// + /// Is guaranteed to return `Some` if `n < Self::len()`. + fn nth_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>>; + + /// Do a [`SteppedMigration::transactional_step`] on the `n`th migration. + /// + /// Is guaranteed to return `Some` if `n < Self::len()`. + fn nth_transactional_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>>; + + /// The maximal encoded length across all cursors. + fn cursor_max_encoded_len() -> usize; + + /// The maximal encoded length across all identifiers. + fn identifier_max_encoded_len() -> usize; + + /// Assert the integrity of the migrations. + /// + /// Should be executed as part of a test prior to runtime usage. May or may not need + /// externalities. + #[cfg(feature = "std")] + fn integrity_test() -> Result<(), &'static str> { + use crate::ensure; + let l = Self::len(); + + for n in 0..l { + ensure!(Self::nth_id(n).is_some(), "id is None"); + ensure!(Self::nth_max_steps(n).is_some(), "steps is None"); + + // The cursor that we use does not matter. Hence use empty. + ensure!( + Self::nth_step(n, Some(vec![]), &mut WeightMeter::new()).is_some(), + "steps is None" + ); + ensure!( + Self::nth_transactional_step(n, Some(vec![]), &mut WeightMeter::new()).is_some(), + "steps is None" + ); + } + + Ok(()) + } +} + +impl SteppedMigrations for () { + fn len() -> u32 { + 0 + } + + fn nth_id(_n: u32) -> Option> { + None + } + + fn nth_max_steps(_n: u32) -> Option> { + None + } + + fn nth_step( + _n: u32, + _cursor: Option>, + _meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + None + } + + fn nth_transactional_step( + _n: u32, + _cursor: Option>, + _meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + None + } + + fn cursor_max_encoded_len() -> usize { + 0 + } + + fn identifier_max_encoded_len() -> usize { + 0 + } +} + +// A collection consisting of only a single migration. +impl SteppedMigrations for T { + fn len() -> u32 { + 1 + } + + fn nth_id(_n: u32) -> Option> { + Some(T::id().encode()) + } + + fn nth_max_steps(n: u32) -> Option> { + // It should be generally fine to call with n>0, but the code should not attempt to. + n.is_zero() + .then_some(T::max_steps()) + .defensive_proof("nth_max_steps should only be called with n==0") + } + + fn nth_step( + _n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + if !_n.is_zero() { + defensive!("nth_step should only be called with n==0"); + return None + } + + let cursor = match cursor { + Some(cursor) => match T::Cursor::decode(&mut &cursor[..]) { + Ok(cursor) => Some(cursor), + Err(_) => return Some(Err(SteppedMigrationError::InvalidCursor)), + }, + None => None, + }; + + Some(T::step(cursor, meter).map(|cursor| cursor.map(|cursor| cursor.encode()))) + } + + fn nth_transactional_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + if n != 0 { + defensive!("nth_transactional_step should only be called with n==0"); + return None + } + + let cursor = match cursor { + Some(cursor) => match T::Cursor::decode(&mut &cursor[..]) { + Ok(cursor) => Some(cursor), + Err(_) => return Some(Err(SteppedMigrationError::InvalidCursor)), + }, + None => None, + }; + + Some( + T::transactional_step(cursor, meter).map(|cursor| cursor.map(|cursor| cursor.encode())), + ) + } + + fn cursor_max_encoded_len() -> usize { + T::Cursor::max_encoded_len() + } + + fn identifier_max_encoded_len() -> usize { + T::Identifier::max_encoded_len() + } +} + +#[impl_trait_for_tuples::impl_for_tuples(1, 30)] +impl SteppedMigrations for Tuple { + fn len() -> u32 { + for_tuples!( #( Tuple::len() )+* ) + } + + fn nth_id(n: u32) -> Option> { + let mut i = 0; + + for_tuples!( #( + if (i + Tuple::len()) > n { + return Tuple::nth_id(n - i) + } + + i += Tuple::len(); + )* ); + + None + } + + fn nth_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + let mut i = 0; + + for_tuples!( #( + if (i + Tuple::len()) > n { + return Tuple::nth_step(n - i, cursor, meter) + } + + i += Tuple::len(); + )* ); + + None + } + + fn nth_transactional_step( + n: u32, + cursor: Option>, + meter: &mut WeightMeter, + ) -> Option>, SteppedMigrationError>> { + let mut i = 0; + + for_tuples! ( #( + if (i + Tuple::len()) > n { + return Tuple::nth_transactional_step(n - i, cursor, meter) + } + + i += Tuple::len(); + )* ); + + None + } + + fn nth_max_steps(n: u32) -> Option> { + let mut i = 0; + + for_tuples!( #( + if (i + Tuple::len()) > n { + return Tuple::nth_max_steps(n - i) + } + + i += Tuple::len(); + )* ); + + None + } + + fn cursor_max_encoded_len() -> usize { + let mut max_len = 0; + + for_tuples!( #( + max_len = max_len.max(Tuple::cursor_max_encoded_len()); + )* ); + + max_len + } + + fn identifier_max_encoded_len() -> usize { + let mut max_len = 0; + + for_tuples!( #( + max_len = max_len.max(Tuple::identifier_max_encoded_len()); + )* ); + + max_len + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{assert_ok, storage::unhashed}; + + #[derive(Decode, Encode, MaxEncodedLen, Eq, PartialEq)] + pub enum Either { + Left(L), + Right(R), + } + + pub struct M0; + impl SteppedMigration for M0 { + type Cursor = (); + type Identifier = u8; + + fn id() -> Self::Identifier { + 0 + } + + fn step( + _cursor: Option, + _meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError> { + log::info!("M0"); + unhashed::put(&[0], &()); + Ok(None) + } + } + + pub struct M1; + impl SteppedMigration for M1 { + type Cursor = (); + type Identifier = u8; + + fn id() -> Self::Identifier { + 1 + } + + fn step( + _cursor: Option, + _meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError> { + log::info!("M1"); + unhashed::put(&[1], &()); + Ok(None) + } + + fn max_steps() -> Option { + Some(1) + } + } + + pub struct M2; + impl SteppedMigration for M2 { + type Cursor = (); + type Identifier = u8; + + fn id() -> Self::Identifier { + 2 + } + + fn step( + _cursor: Option, + _meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError> { + log::info!("M2"); + unhashed::put(&[2], &()); + Ok(None) + } + + fn max_steps() -> Option { + Some(2) + } + } + + pub struct F0; + impl SteppedMigration for F0 { + type Cursor = (); + type Identifier = u8; + + fn id() -> Self::Identifier { + 3 + } + + fn step( + _cursor: Option, + _meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError> { + log::info!("F0"); + unhashed::put(&[3], &()); + Err(SteppedMigrationError::Failed) + } + } + + // Three migrations combined to execute in order: + type Triple = (M0, (M1, M2)); + // Six migrations, just concatenating the ones from before: + type Hextuple = (Triple, Triple); + + #[test] + fn singular_migrations_work() { + assert_eq!(M0::max_steps(), None); + assert_eq!(M1::max_steps(), Some(1)); + assert_eq!(M2::max_steps(), Some(2)); + + assert_eq!(<(M0, M1)>::nth_max_steps(0), Some(None)); + assert_eq!(<(M0, M1)>::nth_max_steps(1), Some(Some(1))); + assert_eq!(<(M0, M1, M2)>::nth_max_steps(2), Some(Some(2))); + + assert_eq!(<(M0, M1)>::nth_max_steps(2), None); + } + + #[test] + fn tuple_migrations_work() { + assert_eq!(<() as SteppedMigrations>::len(), 0); + assert_eq!(<((), ((), ())) as SteppedMigrations>::len(), 0); + assert_eq!(::len(), 3); + assert_eq!(::len(), 6); + + // Check the IDs. The index specific functions all return an Option, + // to account for the out-of-range case. + assert_eq!(::nth_id(0), Some(0u8.encode())); + assert_eq!(::nth_id(1), Some(1u8.encode())); + assert_eq!(::nth_id(2), Some(2u8.encode())); + + sp_io::TestExternalities::default().execute_with(|| { + for n in 0..3 { + ::nth_step( + n, + Default::default(), + &mut WeightMeter::new(), + ); + } + }); + } + + #[test] + fn integrity_test_works() { + sp_io::TestExternalities::default().execute_with(|| { + assert_ok!(<() as SteppedMigrations>::integrity_test()); + assert_ok!(::integrity_test()); + assert_ok!(::integrity_test()); + assert_ok!(::integrity_test()); + assert_ok!(::integrity_test()); + assert_ok!(::integrity_test()); + }); + } + + #[test] + fn transactional_rollback_works() { + sp_io::TestExternalities::default().execute_with(|| { + assert_ok!(<(M0, F0) as SteppedMigrations>::nth_transactional_step( + 0, + Default::default(), + &mut WeightMeter::new() + ) + .unwrap()); + assert!(unhashed::exists(&[0])); + + let _g = crate::StorageNoopGuard::new(); + assert!(<(M0, F0) as SteppedMigrations>::nth_transactional_step( + 1, + Default::default(), + &mut WeightMeter::new() + ) + .unwrap() + .is_err()); + assert!(<(F0, M1) as SteppedMigrations>::nth_transactional_step( + 0, + Default::default(), + &mut WeightMeter::new() + ) + .unwrap() + .is_err()); + }); + } +} diff --git a/substrate/frame/support/src/storage/transactional.rs b/substrate/frame/support/src/storage/transactional.rs index d42e1809e91..0671db4a3a8 100644 --- a/substrate/frame/support/src/storage/transactional.rs +++ b/substrate/frame/support/src/storage/transactional.rs @@ -127,6 +127,22 @@ where } } +/// Same as [`with_transaction`] but casts any internal error to `()`. +/// +/// This rids `E` of the `From` bound that is required by `with_transaction`. +pub fn with_transaction_opaque_err(f: F) -> Result, ()> +where + F: FnOnce() -> TransactionOutcome>, +{ + with_transaction(move || -> TransactionOutcome, DispatchError>> { + match f() { + TransactionOutcome::Commit(res) => TransactionOutcome::Commit(Ok(res)), + TransactionOutcome::Rollback(res) => TransactionOutcome::Rollback(Ok(res)), + } + }) + .map_err(|_| ()) +} + /// Same as [`with_transaction`] but without a limit check on nested transactional layers. /// /// This is mostly for backwards compatibility before there was a transactional layer limit. diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 3d0429f71b1..1997d8fc223 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -59,10 +59,10 @@ pub use misc::{ AccountTouch, Backing, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, DefensiveMax, DefensiveMin, DefensiveSaturating, DefensiveTruncateFrom, EnsureInherentsAreFirst, EqualPrivilegeOnly, EstimateCallFee, - ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, IsSubType, IsType, - Len, OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, SameOrOther, Time, - TryCollect, TryDrop, TypedGet, UnixTime, VariantCount, VariantCountOf, WrapperKeepOpaque, - WrapperOpaque, + ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, IsInherent, + IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, + SameOrOther, Time, TryCollect, TryDrop, TypedGet, UnixTime, VariantCount, VariantCountOf, + WrapperKeepOpaque, WrapperOpaque, }; #[allow(deprecated)] pub use misc::{PreimageProvider, PreimageRecipient}; @@ -86,7 +86,8 @@ mod hooks; pub use hooks::GenesisBuild; pub use hooks::{ BeforeAllRuntimeMigrations, BuildGenesisConfig, Hooks, IntegrityTest, OnFinalize, OnGenesis, - OnIdle, OnInitialize, OnRuntimeUpgrade, OnTimestampSet, + OnIdle, OnInitialize, OnPoll, OnRuntimeUpgrade, OnTimestampSet, PostInherents, + PostTransactions, PreInherents, }; pub mod schedule; diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index c37fb0f54bc..7d0e5aa1e89 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -25,10 +25,74 @@ use crate::weights::Weight; use impl_trait_for_tuples::impl_for_tuples; use sp_runtime::traits::AtLeast32BitUnsigned; use sp_std::prelude::*; +use sp_weights::WeightMeter; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; +/// Provides a callback to execute logic before the all inherents. +pub trait PreInherents { + /// Called before all inherents were applied but after `on_initialize`. + fn pre_inherents() {} +} + +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl PreInherents for Tuple { + fn pre_inherents() { + for_tuples!( #( Tuple::pre_inherents(); )* ); + } +} + +/// Provides a callback to execute logic after the all inherents. +pub trait PostInherents { + /// Called after all inherents were applied. + fn post_inherents() {} +} + +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl PostInherents for Tuple { + fn post_inherents() { + for_tuples!( #( Tuple::post_inherents(); )* ); + } +} + +/// Provides a callback to execute logic before the all transactions. +pub trait PostTransactions { + /// Called after all transactions were applied but before `on_finalize`. + fn post_transactions() {} +} + +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl PostTransactions for Tuple { + fn post_transactions() { + for_tuples!( #( Tuple::post_transactions(); )* ); + } +} + +/// Periodically executes logic. Is not guaranteed to run within a specific timeframe and should +/// only be used on logic that has no deadline. +pub trait OnPoll { + /// Code to execute every now and then at the beginning of the block after inherent application. + /// + /// The remaining weight limit must be respected. + fn on_poll(_n: BlockNumber, _weight: &mut WeightMeter) {} +} + +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl OnPoll for Tuple { + fn on_poll(n: BlockNumber, weight: &mut WeightMeter) { + for_tuples!( #( Tuple::on_poll(n.clone(), weight); )* ); + } +} + /// See [`Hooks::on_initialize`]. pub trait OnInitialize { /// See [`Hooks::on_initialize`]. @@ -374,6 +438,12 @@ pub trait Hooks { Weight::zero() } + /// A hook to run logic after inherent application. + /// + /// Is not guaranteed to execute in a block and should therefore only be used in no-deadline + /// scenarios. + fn on_poll(_n: BlockNumber, _weight: &mut WeightMeter) {} + /// Hook executed when a code change (aka. a "runtime upgrade") is detected by the FRAME /// `Executive` pallet. /// diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index eafd9c8abdd..1f634a64282 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -23,6 +23,7 @@ use impl_trait_for_tuples::impl_for_tuples; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; use sp_arithmetic::traits::{CheckedAdd, CheckedMul, CheckedSub, One, Saturating}; use sp_core::bounded::bounded_vec::TruncateFrom; + #[doc(hidden)] pub use sp_runtime::traits::{ ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, ConstU32, @@ -895,11 +896,21 @@ pub trait GetBacking { /// A trait to ensure the inherent are before non-inherent in a block. /// /// This is typically implemented on runtime, through `construct_runtime!`. -pub trait EnsureInherentsAreFirst { +pub trait EnsureInherentsAreFirst: + IsInherent<::Extrinsic> +{ /// Ensure the position of inherent is correct, i.e. they are before non-inherents. /// - /// On error return the index of the inherent with invalid position (counting from 0). - fn ensure_inherents_are_first(block: &Block) -> Result<(), u32>; + /// On error return the index of the inherent with invalid position (counting from 0). On + /// success it returns the index of the last inherent. `0` therefore means that there are no + /// inherents. + fn ensure_inherents_are_first(block: &Block) -> Result; +} + +/// A trait to check if an extrinsic is an inherent. +pub trait IsInherent { + /// Whether this extrinsic is an inherent. + fn is_inherent(ext: &Extrinsic) -> bool; } /// An extrinsic on which we can get access to call. diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr index a4c7ecf7865..30005c07cb6 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -53,8 +53,9 @@ error[E0599]: no function or associated item named `is_inherent` found for struc | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope - = note: the following trait defines an item `is_inherent`, perhaps you need to implement it: + = note: the following traits define an item `is_inherent`, perhaps you need to implement one of them: candidate #1: `ProvideInherent` + candidate #2: `IsInherent` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `check_inherent` found for struct `pallet::Pallet` in the current scope @@ -119,3 +120,23 @@ error[E0599]: no function or associated item named `is_inherent_required` found = note: the following trait defines an item `is_inherent_required`, perhaps you need to implement it: candidate #1: `ProvideInherent` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `pallet::Pallet: ProvideInherent` is not satisfied + --> tests/construct_runtime_ui/undefined_inherent_part.rs:70:3 + | +70 | Pallet: pallet expanded::{}::{Pallet, Inherent}, + | ^^^^^^ the trait `ProvideInherent` is not implemented for `pallet::Pallet` + +error[E0277]: the trait bound `pallet::Pallet: ProvideInherent` is not satisfied + --> tests/construct_runtime_ui/undefined_inherent_part.rs:66:1 + | +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +71 | | } +72 | | } + | |_^ the trait `ProvideInherent` is not implemented for `pallet::Pallet` + | + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs index 79e9d678671..33b96dea948 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs @@ -90,6 +90,7 @@ fn module_error_outer_enum_expand_explicit() { frame_system::Error::NonDefaultComposite => (), frame_system::Error::NonZeroRefCount => (), frame_system::Error::CallFiltered => (), + frame_system::Error::MultiBlockMigrationsOngoing => (), #[cfg(feature = "experimental")] frame_system::Error::InvalidTask => (), #[cfg(feature = "experimental")] diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs index 4bd8ee0bb39..db006fe7935 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs @@ -90,6 +90,7 @@ fn module_error_outer_enum_expand_implicit() { frame_system::Error::NonDefaultComposite => (), frame_system::Error::NonZeroRefCount => (), frame_system::Error::CallFiltered => (), + frame_system::Error::MultiBlockMigrationsOngoing => (), #[cfg(feature = "experimental")] frame_system::Error::InvalidTask => (), #[cfg(feature = "experimental")] diff --git a/substrate/frame/support/test/tests/runtime_metadata.rs b/substrate/frame/support/test/tests/runtime_metadata.rs index bb7f7d2822e..40e70b219ba 100644 --- a/substrate/frame/support/test/tests/runtime_metadata.rs +++ b/substrate/frame/support/test/tests/runtime_metadata.rs @@ -101,7 +101,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } @@ -200,8 +200,8 @@ fn runtime_metadata() { name: "header", ty: meta_type::<&::Header>(), }], - output: meta_type::<()>(), - docs: maybe_docs(vec![" Initialize a block with the given header."]), + output: meta_type::(), + docs: maybe_docs(vec![" Initialize a block with the given header and return the runtime executive mode."]), }, ], docs: maybe_docs(vec![ diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 01df09106d9..3b659916379 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -130,11 +130,13 @@ use frame_support::{ DispatchResult, DispatchResultWithPostInfo, PerDispatchClass, PostDispatchInfo, }, ensure, impl_ensure_origin_with_arg_ignoring_arg, + migrations::MultiStepMigrator, pallet_prelude::Pays, storage::{self, StorageStreamIter}, traits::{ ConstU32, Contains, EnsureOrigin, EnsureOriginWithArg, Get, HandleLifetime, - OnKilledAccount, OnNewAccount, OriginTrait, PalletInfo, SortedMembers, StoredMap, TypedGet, + OnKilledAccount, OnNewAccount, OnRuntimeUpgrade, OriginTrait, PalletInfo, SortedMembers, + StoredMap, TypedGet, }, Parameter, }; @@ -169,6 +171,7 @@ pub use extensions::{ // Backward compatible re-export. pub use extensions::check_mortality::CheckMortality as CheckEra; pub use frame_support::dispatch::RawOrigin; +use frame_support::traits::{PostInherents, PostTransactions, PreInherents}; pub use weights::WeightInfo; const LOG_TARGET: &str = "runtime::system"; @@ -299,6 +302,11 @@ pub mod pallet { type BaseCallFilter = frame_support::traits::Everything; type BlockHashCount = frame_support::traits::ConstU64<10>; type OnSetCode = (); + type SingleBlockMigrations = (); + type MultiBlockMigrator = (); + type PreInherents = (); + type PostInherents = (); + type PostTransactions = (); } /// Default configurations of this pallet in a solo-chain environment. @@ -393,6 +401,11 @@ pub mod pallet { /// The set code logic, just the default since we're not a parachain. type OnSetCode = (); + type SingleBlockMigrations = (); + type MultiBlockMigrator = (); + type PreInherents = (); + type PostInherents = (); + type PostTransactions = (); } /// Default configurations of this pallet in a relay-chain environment. @@ -572,6 +585,35 @@ pub mod pallet { /// The maximum number of consumers allowed on a single account. type MaxConsumers: ConsumerLimits; + + /// All migrations that should run in the next runtime upgrade. + /// + /// These used to be formerly configured in `Executive`. Parachains need to ensure that + /// running all these migrations in one block will not overflow the weight limit of a block. + /// The migrations are run *before* the pallet `on_runtime_upgrade` hooks, just like the + /// `OnRuntimeUpgrade` migrations. + type SingleBlockMigrations: OnRuntimeUpgrade; + + /// The migrator that is used to run Multi-Block-Migrations. + /// + /// Can be set to [`pallet-migrations`] or an alternative implementation of the interface. + /// The diagram in `frame_executive::block_flowchart` explains when it runs. + type MultiBlockMigrator: MultiStepMigrator; + + /// A callback that executes in *every block* directly before all inherents were applied. + /// + /// See `frame_executive::block_flowchart` for a in-depth explanation when it runs. + type PreInherents: PreInherents; + + /// A callback that executes in *every block* directly after all inherents were applied. + /// + /// See `frame_executive::block_flowchart` for a in-depth explanation when it runs. + type PostInherents: PostInherents; + + /// A callback that executes in *every block* directly after all transactions were applied. + /// + /// See `frame_executive::block_flowchart` for a in-depth explanation when it runs. + type PostTransactions: PostTransactions; } #[pallet::pallet] @@ -619,6 +661,9 @@ pub mod pallet { } /// Set the new runtime code without doing any checks of the given `code`. + /// + /// Note that runtime upgrades will not run if this is called with a not-increasing spec + /// version! #[pallet::call_index(3)] #[pallet::weight((T::SystemWeightInfo::set_code(), DispatchClass::Operational))] pub fn set_code_without_checks( @@ -814,6 +859,8 @@ pub mod pallet { NonZeroRefCount, /// The origin filter prevent the call to be dispatched. CallFiltered, + /// A multi-block migration is ongoing and prevents the current code from being replaced. + MultiBlockMigrationsOngoing, #[cfg(feature = "experimental")] /// The specified [`Task`] is not valid. InvalidTask, @@ -845,6 +892,10 @@ pub mod pallet { #[pallet::storage] pub(super) type ExtrinsicCount = StorageValue<_, u32>; + /// Whether all inherents have been applied. + #[pallet::storage] + pub type InherentsApplied = StorageValue<_, bool, ValueQuery>; + /// The current weight for the block. #[pallet::storage] #[pallet::whitelist_storage] @@ -1373,6 +1424,19 @@ impl Pallet { Self::deposit_event(Event::CodeUpdated); } + /// Whether all inherents have been applied. + pub fn inherents_applied() -> bool { + InherentsApplied::::get() + } + + /// Note that all inherents have been applied. + /// + /// Should be called immediately after all inherents have been applied. Must be called at least + /// once per block. + pub fn note_inherents_applied() { + InherentsApplied::::put(true); + } + /// Increment the reference counter on an account. #[deprecated = "Use `inc_consumers` instead"] pub fn inc_ref(who: &T::AccountId) { @@ -1692,6 +1756,7 @@ impl Pallet { >::put(digest); >::put(parent_hash); >::insert(*number - One::one(), parent_hash); + >::kill(); // Remove previous block data from storage BlockWeight::::kill(); @@ -1738,6 +1803,7 @@ impl Pallet { ExecutionPhase::::kill(); AllExtrinsicsLen::::kill(); storage::unhashed::kill(well_known_keys::INTRABLOCK_ENTROPY); + InherentsApplied::::kill(); // The following fields // @@ -1981,10 +2047,14 @@ impl Pallet { /// Determine whether or not it is possible to update the code. /// - /// Checks the given code if it is a valid runtime wasm blob by instantianting + /// Checks the given code if it is a valid runtime wasm blob by instantiating /// it and extracting the runtime version of it. It checks that the runtime version /// of the old and new runtime has the same spec name and that the spec version is increasing. pub fn can_set_code(code: &[u8]) -> Result<(), sp_runtime::DispatchError> { + if T::MultiBlockMigrator::ongoing() { + return Err(Error::::MultiBlockMigrationsOngoing.into()) + } + let current_version = T::Version::get(); let new_version = sp_io::misc::runtime_version(code) .and_then(|v| RuntimeVersion::decode(&mut &v[..]).ok()) diff --git a/substrate/frame/system/src/mock.rs b/substrate/frame/system/src/mock.rs index c4108099e39..7059845a7df 100644 --- a/substrate/frame/system/src/mock.rs +++ b/substrate/frame/system/src/mock.rs @@ -86,6 +86,22 @@ impl Config for Test { type Version = Version; type AccountData = u32; type OnKilledAccount = RecordKilled; + type MultiBlockMigrator = MockedMigrator; +} + +parameter_types! { + pub static Ongoing: bool = false; +} + +pub struct MockedMigrator; +impl frame_support::migrations::MultiStepMigrator for MockedMigrator { + fn ongoing() -> bool { + Ongoing::get() + } + + fn step() -> Weight { + Weight::zero() + } } pub type SysEvent = frame_system::Event; diff --git a/substrate/frame/system/src/tests.rs b/substrate/frame/system/src/tests.rs index e437e7f9f39..b889b5ca046 100644 --- a/substrate/frame/system/src/tests.rs +++ b/substrate/frame/system/src/tests.rs @@ -675,6 +675,28 @@ fn set_code_with_real_wasm_blob() { }); } +#[test] +fn set_code_rejects_during_mbm() { + Ongoing::set(true); + + let executor = substrate_test_runtime_client::new_native_or_wasm_executor(); + let mut ext = new_test_ext(); + ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(executor)); + ext.execute_with(|| { + System::set_block_number(1); + let res = System::set_code( + RawOrigin::Root.into(), + substrate_test_runtime_client::runtime::wasm_binary_unwrap().to_vec(), + ); + assert_eq!( + res, + Err(DispatchErrorWithPostInfo::from(Error::::MultiBlockMigrationsOngoing)) + ); + + assert!(System::events().is_empty()); + }); +} + #[test] fn set_code_via_authorization_works() { let executor = substrate_test_runtime_client::new_native_or_wasm_executor(); diff --git a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs index 2b1e65ec885..e34e4c0e767 100644 --- a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -456,6 +456,7 @@ impl<'a> ToClientSideDecl<'a> { |err| #crate_::ApiError::FailedToDecodeReturnValue { function: #function_name, error: err, + raw: r.clone(), } ) ) diff --git a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index c1339ff6621..1761e0ac9db 100644 --- a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -158,7 +158,7 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result::Hash, _: &<#block_type as #crate_::BlockT>::Header, - ) -> std::result::Result<(), #crate_::ApiError> { + ) -> std::result::Result<#crate_::__private::ExtrinsicInclusionMode, #crate_::ApiError> { unimplemented!("`Core::initialize_block` not implemented for runtime api mocks") } } diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index 190de1ab3fd..a945b9f21f3 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -101,7 +101,7 @@ pub mod __private { generic::BlockId, traits::{Block as BlockT, Hash as HashT, HashingFor, Header as HeaderT, NumberFor}, transaction_validity::TransactionValidity, - RuntimeString, TransactionOutcome, + ExtrinsicInclusionMode, RuntimeString, TransactionOutcome, }; pub use sp_std::{mem, slice, vec}; pub use sp_version::{create_apis_vec, ApiId, ApisVec, RuntimeVersion}; @@ -115,11 +115,11 @@ pub use sp_core::traits::CallContext; use sp_core::OpaqueMetadata; #[cfg(feature = "std")] use sp_externalities::{Extension, Extensions}; -use sp_runtime::traits::Block as BlockT; #[cfg(feature = "std")] use sp_runtime::traits::HashingFor; #[cfg(feature = "std")] pub use sp_runtime::TransactionOutcome; +use sp_runtime::{traits::Block as BlockT, ExtrinsicInclusionMode}; #[cfg(feature = "std")] pub use sp_state_machine::StorageProof; #[cfg(feature = "std")] @@ -280,7 +280,7 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// ```rust /// use sp_version::create_runtime_str; /// # -/// # use sp_runtime::traits::Block as BlockT; +/// # use sp_runtime::{ExtrinsicInclusionMode, traits::Block as BlockT}; /// # use sp_test_primitives::Block; /// # /// # /// The declaration of the `Runtime` type is done by the `construct_runtime!` macro @@ -307,7 +307,9 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// # unimplemented!() /// # } /// # fn execute_block(_block: Block) {} -/// # fn initialize_block(_header: &::Header) {} +/// # fn initialize_block(_header: &::Header) -> ExtrinsicInclusionMode { +/// # unimplemented!() +/// # } /// # } /// /// impl self::Balance for Runtime { @@ -540,11 +542,12 @@ pub fn init_runtime_logger() { #[cfg(feature = "std")] #[derive(Debug, thiserror::Error)] pub enum ApiError { - #[error("Failed to decode return value of {function}")] + #[error("Failed to decode return value of {function}: {error} raw data: {raw:?}")] FailedToDecodeReturnValue { function: &'static str, #[source] error: codec::Error, + raw: Vec, }, #[error("Failed to convert return value from runtime to node of {function}")] FailedToConvertReturnValue { @@ -800,15 +803,18 @@ pub fn deserialize_runtime_api_info(bytes: [u8; RUNTIME_API_INFO_SIZE]) -> ([u8; decl_runtime_apis! { /// The `Core` runtime api that every Substrate runtime needs to implement. #[core_trait] - #[api_version(4)] + #[api_version(5)] pub trait Core { /// Returns the version of the runtime. fn version() -> RuntimeVersion; /// Execute the given block. fn execute_block(block: Block); /// Initialize a block with the given header. + #[changed_in(5)] #[renamed("initialise_block", 2)] fn initialize_block(header: &::Header); + /// Initialize a block with the given header and return the runtime executive mode. + fn initialize_block(header: &::Header) -> ExtrinsicInclusionMode; } /// The `Metadata` api trait that returns metadata for the runtime. diff --git a/substrate/primitives/api/test/tests/decl_and_impl.rs b/substrate/primitives/api/test/tests/decl_and_impl.rs index d68470551d2..211a08561fd 100644 --- a/substrate/primitives/api/test/tests/decl_and_impl.rs +++ b/substrate/primitives/api/test/tests/decl_and_impl.rs @@ -139,7 +139,7 @@ impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.rs b/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.rs index 43718e4cd04..262a874213a 100644 --- a/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.rs +++ b/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.rs @@ -40,7 +40,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/impl_missing_version.rs b/substrate/primitives/api/test/tests/ui/impl_missing_version.rs index 560257b5168..58850ab343f 100644 --- a/substrate/primitives/api/test/tests/ui/impl_missing_version.rs +++ b/substrate/primitives/api/test/tests/ui/impl_missing_version.rs @@ -45,7 +45,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/missing_versioned_method.rs b/substrate/primitives/api/test/tests/ui/missing_versioned_method.rs index 6ead545f85a..70f75d06515 100644 --- a/substrate/primitives/api/test/tests/ui/missing_versioned_method.rs +++ b/substrate/primitives/api/test/tests/ui/missing_versioned_method.rs @@ -44,7 +44,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs b/substrate/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs index 8eebc1d79ba..63032000040 100644 --- a/substrate/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs +++ b/substrate/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs @@ -47,7 +47,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/positive_cases/custom_where_bound.rs b/substrate/primitives/api/test/tests/ui/positive_cases/custom_where_bound.rs index 594556d57be..0858813bc99 100644 --- a/substrate/primitives/api/test/tests/ui/positive_cases/custom_where_bound.rs +++ b/substrate/primitives/api/test/tests/ui/positive_cases/custom_where_bound.rs @@ -51,7 +51,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/positive_cases/default_impls.rs b/substrate/primitives/api/test/tests/ui/positive_cases/default_impls.rs index ae573238ffe..3e0cb79156c 100644 --- a/substrate/primitives/api/test/tests/ui/positive_cases/default_impls.rs +++ b/substrate/primitives/api/test/tests/ui/positive_cases/default_impls.rs @@ -46,7 +46,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.rs b/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.rs index 921bf0d0435..b2caea7ab7e 100644 --- a/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.rs +++ b/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.rs @@ -42,7 +42,7 @@ sp_api::impl_runtime_apis! { fn execute_block(_: Block) { unimplemented!() } - fn initialize_block(_: &::Header) { + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { unimplemented!() } } diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index ddf92554c83..44bf3c969e5 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -998,6 +998,16 @@ impl TransactionOutcome { } } +/// Confines the kind of extrinsics that can be included in a block. +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Encode, Decode, TypeInfo)] +pub enum ExtrinsicInclusionMode { + /// All extrinsics are allowed to be included in this block. + #[default] + AllExtrinsics, + /// Inherents are allowed to be included. + OnlyInherents, +} + #[cfg(test)] mod tests { use crate::traits::BlakeTwo256; diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index db9ff187b70..63e0aa6e137 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -61,7 +61,7 @@ use sp_runtime::{ create_runtime_str, impl_opaque_keys, traits::{BlakeTwo256, Block as BlockT, DispatchInfoOf, NumberFor, Verify}, transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, - ApplyExtrinsicResult, Perbill, + ApplyExtrinsicResult, ExtrinsicInclusionMode, Perbill, }; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; @@ -480,9 +480,9 @@ impl_runtime_apis! { Executive::execute_block(block); } - fn initialize_block(header: &::Header) { + fn initialize_block(header: &::Header) -> ExtrinsicInclusionMode { log::trace!(target: LOG_TARGET, "initialize_block: {header:#?}"); - Executive::initialize_block(header); + Executive::initialize_block(header) } } -- GitLab From 833bafdbf7b178b639f895e0d43c43eb502de30c Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Thu, 29 Feb 2024 01:43:53 +0200 Subject: [PATCH 20/86] Fixup multi-collator parachain transition to async backing (#3510) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixing: ``` Verification failed for block 0x07bbf1e04121d70a4bdb21cc055132b53ac2390fa95c4d05497fc91b1e8bf7f5 received from (12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ): "Header 0x07bbf1e04121d70a4bdb21cc055132b53ac2390fa95c4d05497fc91b1e8bf7f5 rejected: too far in the future" ``` --------- Signed-off-by: Alexandru Gheorghe Co-authored-by: Dmitry Sinyavin Co-authored-by: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Co-authored-by: Bastian KΓΆcher --- cumulus/client/consensus/aura/src/lib.rs | 5 ++- cumulus/polkadot-parachain/src/service.rs | 33 +++++++++++-------- prdoc/pr_3510.prdoc | 13 ++++++++ .../bin/node-template/node/src/service.rs | 26 +++++++++------ 4 files changed, 52 insertions(+), 25 deletions(-) create mode 100644 prdoc/pr_3510.prdoc diff --git a/cumulus/client/consensus/aura/src/lib.rs b/cumulus/client/consensus/aura/src/lib.rs index 8e4bc658e44..ed6f5bdd4d6 100644 --- a/cumulus/client/consensus/aura/src/lib.rs +++ b/cumulus/client/consensus/aura/src/lib.rs @@ -54,7 +54,10 @@ use std::{ mod import_queue; pub use import_queue::{build_verifier, import_queue, BuildVerifierParams, ImportQueueParams}; -pub use sc_consensus_aura::{slot_duration, AuraVerifier, BuildAuraWorkerParams, SlotProportion}; +pub use sc_consensus_aura::{ + slot_duration, standalone::slot_duration_at, AuraVerifier, BuildAuraWorkerParams, + SlotProportion, +}; pub use sc_consensus_slots::InherentDataProviderExt; pub mod collator; diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 4e06cd38f1d..386551102e4 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -1251,28 +1251,33 @@ where <::Pair as Pair>::Signature: TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, { - let client2 = client.clone(); + let verifier_client = client.clone(); let aura_verifier = move || { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client2).unwrap(); - Box::new(cumulus_client_consensus_aura::build_verifier::< ::Pair, _, _, _, >(cumulus_client_consensus_aura::BuildVerifierParams { - client: client2.clone(), - create_inherent_data_providers: move |_, _| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((slot, timestamp)) + client: verifier_client.clone(), + create_inherent_data_providers: move |parent_hash, _| { + let cidp_client = verifier_client.clone(); + async move { + let slot_duration = cumulus_client_consensus_aura::slot_duration_at( + &*cidp_client, + parent_hash, + )?; + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + + Ok((slot, timestamp)) + } }, telemetry: telemetry_handle, })) as Box<_> diff --git a/prdoc/pr_3510.prdoc b/prdoc/pr_3510.prdoc new file mode 100644 index 00000000000..6ee2f9a81f7 --- /dev/null +++ b/prdoc/pr_3510.prdoc @@ -0,0 +1,13 @@ +title: "Fix multi-collator parachain transition to async backing" + +doc: + - audience: Node Operator + description: | + The dynamic Aura slot duration, introduced in PR#3211, didn't take the block import pipeline + into account. The result was the parachain backed by multiple collators not being able to + keep producing blocks after its runtime was upgraded to support async backing, requiring to + restart all the collator nodes. This change fixes the issue, introducing the dynamic Aura + slot duration into the block import pipeline. + +crates: + - name: "polkadot-parachain-bin" diff --git a/substrate/bin/node-template/node/src/service.rs b/substrate/bin/node-template/node/src/service.rs index 25cd6511784..125cca13927 100644 --- a/substrate/bin/node-template/node/src/service.rs +++ b/substrate/bin/node-template/node/src/service.rs @@ -80,23 +80,29 @@ pub fn new_partial(config: &Configuration) -> Result { telemetry.as_ref().map(|x| x.handle()), )?; - let slot_duration = sc_consensus_aura::slot_duration(&*client)?; - + let cidp_client = client.clone(); let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { block_import: grandpa_block_import.clone(), justification_import: Some(Box::new(grandpa_block_import.clone())), client: client.clone(), - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + create_inherent_data_providers: move |parent_hash, _| { + let cidp_client = cidp_client.clone(); + async move { + let slot_duration = sc_consensus_aura::standalone::slot_duration_at( + &*cidp_client, + parent_hash, + )?; + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); - Ok((slot, timestamp)) + Ok((slot, timestamp)) + } }, spawner: &task_manager.spawn_essential_handle(), registry: config.prometheus_registry(), -- GitLab From a22319cdd5fcdcec3bbedfe41f6d11a6b5446cf9 Mon Sep 17 00:00:00 2001 From: philoniare Date: Thu, 29 Feb 2024 13:17:24 +0800 Subject: [PATCH 21/86] [Deprecation] Remove sp_weights::OldWeight (#3491) # Description *Removes `sp_weights::OldWeight` and its usage* Fixes #144 --------- Co-authored-by: Liam Aharon --- prdoc/pr_3491.prdoc | 12 +++ substrate/frame/contracts/src/wasm/mod.rs | 11 --- substrate/primitives/weights/src/lib.rs | 27 +------ .../primitives/weights/src/weight_meter.rs | 81 ++++++++----------- 4 files changed, 47 insertions(+), 84 deletions(-) create mode 100644 prdoc/pr_3491.prdoc diff --git a/prdoc/pr_3491.prdoc b/prdoc/pr_3491.prdoc new file mode 100644 index 00000000000..e36afb916a6 --- /dev/null +++ b/prdoc/pr_3491.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove Deprecated OldWeight + +doc: + - audience: Runtime Dev + description: | + Removed deprecated sp_weights::OldWeight type. Use [`weight_v2::Weight`] instead. + +crates: + - name: frame-support diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index c96c2856509..34b45cba5d2 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -1821,17 +1821,6 @@ mod tests { assert!(weight_left.all_gt(actual_left), "gas_left must be greater than final"); } - /// Test that [`frame_support::weights::OldWeight`] en/decodes the same as our - /// [`crate::OldWeight`]. - #[test] - fn old_weight_decode() { - #![allow(deprecated)] - let sp = frame_support::weights::OldWeight(42).encode(); - let our = crate::OldWeight::decode(&mut &*sp).unwrap(); - - assert_eq!(our, 42); - } - const CODE_VALUE_TRANSFERRED: &str = r#" (module (import "seal0" "seal_value_transferred" (func $seal_value_transferred (param i32 i32))) diff --git a/substrate/primitives/weights/src/lib.rs b/substrate/primitives/weights/src/lib.rs index aede9473535..b2c956266e2 100644 --- a/substrate/primitives/weights/src/lib.rs +++ b/substrate/primitives/weights/src/lib.rs @@ -18,9 +18,6 @@ //! # Primitives for transaction weighting. #![cfg_attr(not(feature = "std"), no_std)] -// TODO remove once `OldWeight` is gone. I dont know why this is needed, maybe by one of the macros -// of `OldWeight`. -#![allow(deprecated)] extern crate self as sp_weights; @@ -28,7 +25,7 @@ mod weight_meter; mod weight_v2; use bounded_collections::Get; -use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode}; use scale_info::TypeInfo; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -52,28 +49,6 @@ pub mod constants { pub const WEIGHT_PROOF_SIZE_PER_KB: u64 = 1024; } -/// The old weight type. -/// -/// NOTE: This type exists purely for compatibility purposes! Use [`weight_v2::Weight`] in all other -/// cases. -#[derive( - Decode, - Encode, - CompactAs, - PartialEq, - Eq, - Clone, - Copy, - RuntimeDebug, - Default, - MaxEncodedLen, - TypeInfo, -)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "serde", serde(transparent))] -#[deprecated(note = "Will be removed soon; use `Weight` instead.")] -pub struct OldWeight(pub u64); - /// The weight of database operations that the runtime can invoke. /// /// NOTE: This is currently only measured in computational time, and will probably diff --git a/substrate/primitives/weights/src/weight_meter.rs b/substrate/primitives/weights/src/weight_meter.rs index 1738948e4c3..cfe8396ae6d 100644 --- a/substrate/primitives/weights/src/weight_meter.rs +++ b/substrate/primitives/weights/src/weight_meter.rs @@ -118,13 +118,6 @@ impl WeightMeter { debug_assert!(self.consumed.all_lte(self.limit), "Weight counter overflow"); } - /// Consume the given weight after checking that it can be consumed and return `true`. Otherwise - /// do nothing and return `false`. - #[deprecated(note = "Use `try_consume` instead. Will be removed after December 2023.")] - pub fn check_accrue(&mut self, w: Weight) -> bool { - self.try_consume(w).is_ok() - } - /// Consume the given weight after checking that it can be consumed. /// /// Returns `Ok` if the weight can be consumed or otherwise an `Err`. @@ -139,12 +132,6 @@ impl WeightMeter { }) } - /// Check if the given weight can be consumed. - #[deprecated(note = "Use `can_consume` instead. Will be removed after December 2023.")] - pub fn can_accrue(&self, w: Weight) -> bool { - self.can_consume(w) - } - /// Check if the given weight can be consumed. pub fn can_consume(&self, w: Weight) -> bool { self.consumed.checked_add(&w).map_or(false, |t| t.all_lte(self.limit)) @@ -165,80 +152,80 @@ mod tests { fn weight_meter_remaining_works() { let mut meter = WeightMeter::with_limit(Weight::from_parts(10, 20)); - assert!(meter.check_accrue(Weight::from_parts(5, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(5, 0)), Ok(())); assert_eq!(meter.consumed, Weight::from_parts(5, 0)); assert_eq!(meter.remaining(), Weight::from_parts(5, 20)); - assert!(meter.check_accrue(Weight::from_parts(2, 10))); + assert_eq!(meter.try_consume(Weight::from_parts(2, 10)), Ok(())); assert_eq!(meter.consumed, Weight::from_parts(7, 10)); assert_eq!(meter.remaining(), Weight::from_parts(3, 10)); - assert!(meter.check_accrue(Weight::from_parts(3, 10))); + assert_eq!(meter.try_consume(Weight::from_parts(3, 10)), Ok(())); assert_eq!(meter.consumed, Weight::from_parts(10, 20)); assert_eq!(meter.remaining(), Weight::from_parts(0, 0)); } #[test] - fn weight_meter_can_accrue_works() { + fn weight_meter_can_consume_works() { let meter = WeightMeter::with_limit(Weight::from_parts(1, 1)); - assert!(meter.can_accrue(Weight::from_parts(0, 0))); - assert!(meter.can_accrue(Weight::from_parts(1, 1))); - assert!(!meter.can_accrue(Weight::from_parts(0, 2))); - assert!(!meter.can_accrue(Weight::from_parts(2, 0))); - assert!(!meter.can_accrue(Weight::from_parts(2, 2))); + assert!(meter.can_consume(Weight::from_parts(0, 0))); + assert!(meter.can_consume(Weight::from_parts(1, 1))); + assert!(!meter.can_consume(Weight::from_parts(0, 2))); + assert!(!meter.can_consume(Weight::from_parts(2, 0))); + assert!(!meter.can_consume(Weight::from_parts(2, 2))); } #[test] - fn weight_meter_check_accrue_works() { + fn weight_meter_try_consume_works() { let mut meter = WeightMeter::with_limit(Weight::from_parts(2, 2)); - assert!(meter.check_accrue(Weight::from_parts(0, 0))); - assert!(meter.check_accrue(Weight::from_parts(1, 1))); - assert!(!meter.check_accrue(Weight::from_parts(0, 2))); - assert!(!meter.check_accrue(Weight::from_parts(2, 0))); - assert!(!meter.check_accrue(Weight::from_parts(2, 2))); - assert!(meter.check_accrue(Weight::from_parts(0, 1))); - assert!(meter.check_accrue(Weight::from_parts(1, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(0, 0)), Ok(())); + assert_eq!(meter.try_consume(Weight::from_parts(1, 1)), Ok(())); + assert_eq!(meter.try_consume(Weight::from_parts(0, 2)), Err(())); + assert_eq!(meter.try_consume(Weight::from_parts(2, 0)), Err(())); + assert_eq!(meter.try_consume(Weight::from_parts(2, 2)), Err(())); + assert_eq!(meter.try_consume(Weight::from_parts(0, 1)), Ok(())); + assert_eq!(meter.try_consume(Weight::from_parts(1, 0)), Ok(())); } #[test] - fn weight_meter_check_and_can_accrue_works() { + fn weight_meter_check_and_can_consume_works() { let mut meter = WeightMeter::new(); - assert!(meter.can_accrue(Weight::from_parts(u64::MAX, 0))); - assert!(meter.check_accrue(Weight::from_parts(u64::MAX, 0))); + assert!(meter.can_consume(Weight::from_parts(u64::MAX, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(u64::MAX, 0)), Ok(())); - assert!(meter.can_accrue(Weight::from_parts(0, u64::MAX))); - assert!(meter.check_accrue(Weight::from_parts(0, u64::MAX))); + assert!(meter.can_consume(Weight::from_parts(0, u64::MAX))); + assert_eq!(meter.try_consume(Weight::from_parts(0, u64::MAX)), Ok(())); - assert!(!meter.can_accrue(Weight::from_parts(0, 1))); - assert!(!meter.check_accrue(Weight::from_parts(0, 1))); + assert!(!meter.can_consume(Weight::from_parts(0, 1))); + assert_eq!(meter.try_consume(Weight::from_parts(0, 1)), Err(())); - assert!(!meter.can_accrue(Weight::from_parts(1, 0))); - assert!(!meter.check_accrue(Weight::from_parts(1, 0))); + assert!(!meter.can_consume(Weight::from_parts(1, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(1, 0)), Err(())); - assert!(meter.can_accrue(Weight::zero())); - assert!(meter.check_accrue(Weight::zero())); + assert!(meter.can_consume(Weight::zero())); + assert_eq!(meter.try_consume(Weight::zero()), Ok(())); } #[test] fn consumed_ratio_works() { let mut meter = WeightMeter::with_limit(Weight::from_parts(10, 20)); - assert!(meter.check_accrue(Weight::from_parts(5, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(5, 0)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(50)); - assert!(meter.check_accrue(Weight::from_parts(0, 12))); + assert_eq!(meter.try_consume(Weight::from_parts(0, 12)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(60)); - assert!(meter.check_accrue(Weight::from_parts(2, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(2, 0)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(70)); - assert!(meter.check_accrue(Weight::from_parts(0, 4))); + assert_eq!(meter.try_consume(Weight::from_parts(0, 4)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(80)); - assert!(meter.check_accrue(Weight::from_parts(3, 0))); + assert_eq!(meter.try_consume(Weight::from_parts(3, 0)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(100)); - assert!(meter.check_accrue(Weight::from_parts(0, 4))); + assert_eq!(meter.try_consume(Weight::from_parts(0, 4)), Ok(())); assert_eq!(meter.consumed_ratio(), Perbill::from_percent(100)); } -- GitLab From a035dc9be718c6327420c87fecb7a752954492f4 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Thu, 29 Feb 2024 09:12:02 +0200 Subject: [PATCH 22/86] Remove `AssignmentProviderConfig` and use parameters from `HostConfiguration` instead (#3181) This PR removes `AssignmentProviderConfig` and uses the corresponding ondemand parameters from `HostConfiguration` instead. Additionally `scheduling_lookahead` and all coretime/ondemand related parameters are extracted in a separate struct - `SchedulerParams`. The most relevant commit from the PR is [this one](https://github.com/paritytech/polkadot-sdk/pull/3181/commits/830bc0f5e858944474171bbe33382ad96040b535). Fixes https://github.com/paritytech/polkadot-sdk/issues/2268 --------- Co-authored-by: command-bot <> --- .../zombienet/tests/0002-pov_recovery.toml | 2 +- polkadot/node/service/src/chain_spec.rs | 16 +- polkadot/node/test/service/src/chain_spec.rs | 11 +- polkadot/primitives/src/vstaging/mod.rs | 76 ++++ .../implementers-guide/src/types/runtime.md | 118 +----- .../src/assigner_coretime/mock_helpers.rs | 11 +- .../parachains/src/assigner_coretime/mod.rs | 16 +- .../src/assigner_on_demand/benchmarking.rs | 2 +- .../src/assigner_on_demand/mock_helpers.rs | 11 +- .../parachains/src/assigner_on_demand/mod.rs | 18 +- .../parachains/src/assigner_parachains.rs | 12 +- .../src/assigner_parachains/mock_helpers.rs | 11 +- polkadot/runtime/parachains/src/builder.rs | 19 +- .../runtime/parachains/src/configuration.rs | 133 +++---- .../src/configuration/benchmarking.rs | 2 + .../parachains/src/configuration/migration.rs | 1 + .../src/configuration/migration/v11.rs | 113 +++++- .../src/configuration/migration/v12.rs | 349 ++++++++++++++++++ .../parachains/src/configuration/tests.rs | 51 ++- .../parachains/src/coretime/migration.rs | 19 +- .../runtime/parachains/src/coretime/mod.rs | 4 +- .../runtime/parachains/src/inclusion/tests.rs | 10 +- polkadot/runtime/parachains/src/mock.rs | 23 +- .../runtime/parachains/src/paras/tests.rs | 7 +- .../parachains/src/paras_inherent/tests.rs | 11 +- polkadot/runtime/parachains/src/scheduler.rs | 43 +-- .../parachains/src/scheduler/common.rs | 19 +- .../runtime/parachains/src/scheduler/tests.rs | 81 ++-- .../parachains/src/session_info/tests.rs | 4 +- polkadot/runtime/rococo/src/lib.rs | 1 + .../runtime_parachains_configuration.rs | 50 ++- polkadot/runtime/westend/src/lib.rs | 1 + .../runtime_parachains_configuration.rs | 50 ++- .../functional/0002-parachains-disputes.toml | 4 +- .../0004-parachains-garbage-candidate.toml | 4 +- ...0005-parachains-disputes-past-session.toml | 4 +- .../0006-parachains-max-tranche0.toml | 4 +- .../0007-dispute-freshly-finalized.toml | 4 +- .../0008-dispute-old-finalized.toml | 4 +- .../functional/0010-validator-disabling.toml | 4 +- .../0011-async-backing-6-seconds-rate.toml | 7 +- .../functional/0012-elastic-scaling-mvp.toml | 6 +- .../zombienet_tests/misc/0001-paritydb.toml | 4 +- 43 files changed, 901 insertions(+), 439 deletions(-) create mode 100644 polkadot/runtime/parachains/src/configuration/migration/v12.rs diff --git a/cumulus/zombienet/tests/0002-pov_recovery.toml b/cumulus/zombienet/tests/0002-pov_recovery.toml index fe42fd4b2f6..15a61eba2a0 100644 --- a/cumulus/zombienet/tests/0002-pov_recovery.toml +++ b/cumulus/zombienet/tests/0002-pov_recovery.toml @@ -4,7 +4,7 @@ default_command = "polkadot" chain = "rococo-local" -[relaychain.genesis.runtimeGenesis.patch.configuration.config] +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] # set parameters such that collators only connect to 1 validator as a backing group max_validators_per_core = 1 group_rotation_frequency = 100 # 10 mins diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index af241d1cbc5..af05af87a46 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -24,6 +24,8 @@ use polkadot_primitives::{AccountId, AccountPublic, AssignmentId, ValidatorId}; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; +#[cfg(any(feature = "rococo-native", feature = "westend-native",))] +use polkadot_primitives::vstaging::SchedulerParams; #[cfg(feature = "rococo-native")] use rococo_runtime as rococo; #[cfg(feature = "rococo-native")] @@ -129,8 +131,6 @@ fn default_parachains_host_configuration( max_code_size: MAX_CODE_SIZE, max_pov_size: MAX_POV_SIZE, max_head_data_size: 32 * 1024, - group_rotation_frequency: 20, - paras_availability_period: 4, max_upward_queue_count: 8, max_upward_queue_size: 1024 * 1024, max_downward_message_size: 1024 * 1024, @@ -151,11 +151,16 @@ fn default_parachains_host_configuration( relay_vrf_modulo_samples: 2, zeroth_delay_tranche_width: 0, minimum_validation_upgrade_delay: 5, - scheduling_lookahead: 2, async_backing_params: AsyncBackingParams { max_candidate_depth: 3, allowed_ancestry_len: 2, }, + scheduler_params: SchedulerParams { + lookahead: 2, + group_rotation_frequency: 20, + paras_availability_period: 4, + ..Default::default() + }, ..Default::default() } } @@ -891,7 +896,10 @@ pub fn rococo_testnet_genesis( "sudo": { "key": Some(root_key.clone()) }, "configuration": { "config": polkadot_runtime_parachains::configuration::HostConfiguration { - max_validators_per_core: Some(1), + scheduler_params: SchedulerParams { + max_validators_per_core: Some(1), + ..default_parachains_host_configuration().scheduler_params + }, ..default_parachains_host_configuration() }, }, diff --git a/polkadot/node/test/service/src/chain_spec.rs b/polkadot/node/test/service/src/chain_spec.rs index 4cc387317c3..f14fa9fde58 100644 --- a/polkadot/node/test/service/src/chain_spec.rs +++ b/polkadot/node/test/service/src/chain_spec.rs @@ -19,7 +19,9 @@ use babe_primitives::AuthorityId as BabeId; use grandpa::AuthorityId as GrandpaId; use pallet_staking::Forcing; -use polkadot_primitives::{AccountId, AssignmentId, ValidatorId, MAX_CODE_SIZE, MAX_POV_SIZE}; +use polkadot_primitives::{ + vstaging::SchedulerParams, AccountId, AssignmentId, ValidatorId, MAX_CODE_SIZE, MAX_POV_SIZE, +}; use polkadot_service::chain_spec::{get_account_id_from_seed, get_from_seed, Extensions}; use polkadot_test_runtime::BABE_GENESIS_EPOCH_CONFIG; use sc_chain_spec::{ChainSpec, ChainType}; @@ -165,11 +167,14 @@ fn polkadot_testnet_genesis( max_code_size: MAX_CODE_SIZE, max_pov_size: MAX_POV_SIZE, max_head_data_size: 32 * 1024, - group_rotation_frequency: 20, - paras_availability_period: 4, no_show_slots: 10, minimum_validation_upgrade_delay: 5, max_downward_message_size: 1024, + scheduler_params: SchedulerParams { + group_rotation_frequency: 20, + paras_availability_period: 4, + ..Default::default() + }, ..Default::default() }, } diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 39d9dfc02c5..bd2a5106c44 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -23,6 +23,7 @@ use sp_std::prelude::*; use parity_scale_codec::{Decode, Encode}; use primitives::RuntimeDebug; use scale_info::TypeInfo; +use sp_arithmetic::Perbill; /// Approval voting configuration parameters #[derive( @@ -50,6 +51,81 @@ impl Default for ApprovalVotingParams { } } +/// Scheduler configuration parameters. All coretime/ondemand parameters are here. +#[derive( + RuntimeDebug, + Copy, + Clone, + PartialEq, + Encode, + Decode, + TypeInfo, + serde::Serialize, + serde::Deserialize, +)] +pub struct SchedulerParams { + /// How often parachain groups should be rotated across parachains. + /// + /// Must be non-zero. + pub group_rotation_frequency: BlockNumber, + /// Availability timeout for a block on a core, measured in blocks. + /// + /// This is the maximum amount of blocks after a core became occupied that validators have time + /// to make the block available. + /// + /// This value only has effect on group rotations. If backers backed something at the end of + /// their rotation, the occupied core affects the backing group that comes afterwards. We limit + /// the effect one backing group can have on the next to `paras_availability_period` blocks. + /// + /// Within a group rotation there is no timeout as backers are only affecting themselves. + /// + /// Must be at least 1. With a value of 1, the previous group will not be able to negatively + /// affect the following group at the expense of a tight availability timeline at group + /// rotation boundaries. + pub paras_availability_period: BlockNumber, + /// The maximum number of validators to have per core. + /// + /// `None` means no maximum. + pub max_validators_per_core: Option, + /// The amount of blocks ahead to schedule paras. + pub lookahead: u32, + /// How many cores are managed by the coretime chain. + pub num_cores: u32, + /// The max number of times a claim can time out in availability. + pub max_availability_timeouts: u32, + /// The maximum queue size of the pay as you go module. + pub on_demand_queue_max_size: u32, + /// The target utilization of the spot price queue in percentages. + pub on_demand_target_queue_utilization: Perbill, + /// How quickly the fee rises in reaction to increased utilization. + /// The lower the number the slower the increase. + pub on_demand_fee_variability: Perbill, + /// The minimum amount needed to claim a slot in the spot pricing queue. + pub on_demand_base_fee: Balance, + /// The number of blocks a claim stays in the scheduler's claimqueue before getting cleared. + /// This number should go reasonably higher than the number of blocks in the async backing + /// lookahead. + pub ttl: BlockNumber, +} + +impl> Default for SchedulerParams { + fn default() -> Self { + Self { + group_rotation_frequency: 1u32.into(), + paras_availability_period: 1u32.into(), + max_validators_per_core: Default::default(), + lookahead: 1, + num_cores: Default::default(), + max_availability_timeouts: Default::default(), + on_demand_queue_max_size: ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + on_demand_target_queue_utilization: Perbill::from_percent(25), + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_base_fee: 10_000_000u128, + ttl: 5u32.into(), + } + } +} + use bitvec::vec::BitVec; /// Bit indices in the `HostConfiguration.node_features` that correspond to different node features. diff --git a/polkadot/roadmap/implementers-guide/src/types/runtime.md b/polkadot/roadmap/implementers-guide/src/types/runtime.md index 4b97409f8df..1dfabd96db7 100644 --- a/polkadot/roadmap/implementers-guide/src/types/runtime.md +++ b/polkadot/roadmap/implementers-guide/src/types/runtime.md @@ -4,106 +4,24 @@ Types used within the runtime exclusively and pervasively. ## Host Configuration -The internal-to-runtime configuration of the parachain host. This is expected to be altered only by governance procedures. - -```rust -struct HostConfiguration { - /// The minimum period, in blocks, between which parachains can update their validation code. - pub validation_upgrade_cooldown: BlockNumber, - /// The delay, in blocks, before a validation upgrade is applied. - pub validation_upgrade_delay: BlockNumber, - /// How long to keep code on-chain, in blocks. This should be sufficiently long that disputes - /// have concluded. - pub code_retention_period: BlockNumber, - /// The maximum validation code size, in bytes. - pub max_code_size: u32, - /// The maximum head-data size, in bytes. - pub max_head_data_size: u32, - /// The amount of availability cores to dedicate to parathreads (on-demand parachains). - pub parathread_cores: u32, - /// The number of retries that a parathread (on-demand parachain) author has to submit their block. - pub parathread_retries: u32, - /// How often parachain groups should be rotated across parachains. - pub group_rotation_frequency: BlockNumber, - /// The availability period, in blocks, for parachains. This is the amount of blocks - /// after inclusion that validators have to make the block available and signal its availability to - /// the chain. Must be at least 1. - pub chain_availability_period: BlockNumber, - /// The availability period, in blocks, for parathreads (on-demand parachains). Same as the `chain_availability_period`, - /// but a differing timeout due to differing requirements. Must be at least 1. - pub thread_availability_period: BlockNumber, - /// The amount of blocks ahead to schedule on-demand parachains. - pub scheduling_lookahead: u32, - /// The maximum number of validators to have per core. `None` means no maximum. - pub max_validators_per_core: Option, - /// The maximum number of validators to use for parachains, in total. `None` means no maximum. - pub max_validators: Option, - /// The amount of sessions to keep for disputes. - pub dispute_period: SessionIndex, - /// How long after dispute conclusion to accept statements. - pub dispute_post_conclusion_acceptance_period: BlockNumber, - /// The maximum number of dispute spam slots - pub dispute_max_spam_slots: u32, - /// The amount of consensus slots that must pass between submitting an assignment and - /// submitting an approval vote before a validator is considered a no-show. - /// Must be at least 1. - pub no_show_slots: u32, - /// The number of delay tranches in total. - pub n_delay_tranches: u32, - /// The width of the zeroth delay tranche for approval assignments. This many delay tranches - /// beyond 0 are all consolidated to form a wide 0 tranche. - pub zeroth_delay_tranche_width: u32, - /// The number of validators needed to approve a block. - pub needed_approvals: u32, - /// The number of samples to use in `RelayVRFModulo` or `RelayVRFModuloCompact` approval assignment criterions. - pub relay_vrf_modulo_samples: u32, - /// Total number of individual messages allowed in the parachain -> relay-chain message queue. - pub max_upward_queue_count: u32, - /// Total size of messages allowed in the parachain -> relay-chain message queue before which - /// no further messages may be added to it. If it exceeds this then the queue may contain only - /// a single message. - pub max_upward_queue_size: u32, - /// The maximum size of an upward message that can be sent by a candidate. - /// - /// This parameter affects the upper bound of size of `CandidateCommitments`. - pub max_upward_message_size: u32, - /// The maximum number of messages that a candidate can contain. - /// - /// This parameter affects the upper bound of size of `CandidateCommitments`. - pub max_upward_message_num_per_candidate: u32, - /// The maximum size of a message that can be put in a downward message queue. - /// - /// Since we require receiving at least one DMP message the obvious upper bound of the size is - /// the PoV size. Of course, there is a lot of other different things that a parachain may - /// decide to do with its PoV so this value in practice will be picked as a fraction of the PoV - /// size. - pub max_downward_message_size: u32, - /// The deposit that the sender should provide for opening an HRMP channel. - pub hrmp_sender_deposit: u32, - /// The deposit that the recipient should provide for accepting opening an HRMP channel. - pub hrmp_recipient_deposit: u32, - /// The maximum number of messages allowed in an HRMP channel at once. - pub hrmp_channel_max_capacity: u32, - /// The maximum total size of messages in bytes allowed in an HRMP channel at once. - pub hrmp_channel_max_total_size: u32, - /// The maximum number of inbound HRMP channels a parachain is allowed to accept. - pub hrmp_max_parachain_inbound_channels: u32, - /// The maximum number of inbound HRMP channels a parathread (on-demand parachain) is allowed to accept. - pub hrmp_max_parathread_inbound_channels: u32, - /// The maximum size of a message that could ever be put into an HRMP channel. - /// - /// This parameter affects the upper bound of size of `CandidateCommitments`. - pub hrmp_channel_max_message_size: u32, - /// The maximum number of outbound HRMP channels a parachain is allowed to open. - pub hrmp_max_parachain_outbound_channels: u32, - /// The maximum number of outbound HRMP channels a parathread (on-demand parachain) is allowed to open. - pub hrmp_max_parathread_outbound_channels: u32, - /// The maximum number of outbound HRMP messages can be sent by a candidate. - /// - /// This parameter affects the upper bound of size of `CandidateCommitments`. - pub hrmp_max_message_num_per_candidate: u32, -} -``` +The internal-to-runtime configuration of the parachain host is kept in `struct HostConfiguration`. This is expected to +be altered only by governance procedures or via migrations from the Polkadot-SDK codebase. The latest definition of +`HostConfiguration` can be found in the project repo +[here](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/runtime/parachains/src/configuration.rs). Each +parameter has got a doc comment so for any details please refer to the code. + +Some related parameters in `HostConfiguration` are grouped together so that they can be managed easily. These are: +* `async_backing_params` in `struct AsyncBackingParams` +* `executor_params` in `struct ExecutorParams` +* `approval_voting_params` in `struct ApprovalVotingParams` +* `scheduler_params` in `struct SchedulerParams` + +Check the definitions of these structs for further details. + +### Configuration migrations +Modifying `HostConfiguration` requires a storage migration. These migrations are located in the +[`migrations`](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/runtime/parachains/src/configuration.rs) +subfolder of Polkadot-SDK repo. ## ParaInherentData diff --git a/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs b/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs index 71c3f1fa39f..e2ba0b4f7ea 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/mock_helpers.rs @@ -64,11 +64,12 @@ impl GenesisConfigBuilder { pub(super) fn build(self) -> MockGenesisConfig { let mut genesis = default_genesis_config(); let config = &mut genesis.configuration.config; - config.coretime_cores = self.on_demand_cores; - config.on_demand_base_fee = self.on_demand_base_fee; - config.on_demand_fee_variability = self.on_demand_fee_variability; - config.on_demand_queue_max_size = self.on_demand_max_queue_size; - config.on_demand_target_queue_utilization = self.on_demand_target_queue_utilization; + config.scheduler_params.num_cores = self.on_demand_cores; + config.scheduler_params.on_demand_base_fee = self.on_demand_base_fee; + config.scheduler_params.on_demand_fee_variability = self.on_demand_fee_variability; + config.scheduler_params.on_demand_queue_max_size = self.on_demand_max_queue_size; + config.scheduler_params.on_demand_target_queue_utilization = + self.on_demand_target_queue_utilization; let paras = &mut genesis.paras.paras; for para_id in self.onboarded_on_demand_chains { diff --git a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs index 9da81dc816c..e2da89fadd4 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs @@ -30,7 +30,7 @@ mod tests; use crate::{ assigner_on_demand, configuration, paras::AssignCoretime, - scheduler::common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, + scheduler::common::{Assignment, AssignmentProvider}, ParaId, }; @@ -316,14 +316,6 @@ impl AssignmentProvider> for Pallet { } } - fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig> { - let config = >::config(); - AssignmentProviderConfig { - max_availability_timeouts: config.on_demand_retries, - ttl: config.on_demand_ttl, - } - } - #[cfg(any(feature = "runtime-benchmarks", test))] fn get_mock_assignment(_: CoreIndex, para_id: primitives::Id) -> Assignment { // Given that we are not tracking anything in `Bulk` assignments, it is safe to always @@ -333,7 +325,7 @@ impl AssignmentProvider> for Pallet { fn session_core_count() -> u32 { let config = >::config(); - config.coretime_cores + config.scheduler_params.num_cores } } @@ -482,8 +474,8 @@ impl AssignCoretime for Pallet { // Add a new core and assign the para to it. let mut config = >::config(); - let core = config.coretime_cores; - config.coretime_cores.saturating_inc(); + let core = config.scheduler_params.num_cores; + config.scheduler_params.num_cores.saturating_inc(); // `assign_coretime` is only called at genesis or by root, so setting the active // config here is fine. diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs index 5a6060cd2b4..8360e7a78d0 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs @@ -43,7 +43,7 @@ where { ParasShared::::set_session_index(SESSION_INDEX); let mut config = HostConfiguration::default(); - config.coretime_cores = 1; + config.scheduler_params.num_cores = 1; ConfigurationPallet::::force_set_active_config(config); let mut parachains = ParachainsCache::new(); ParasPallet::::initialize_para_now( diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs index de30330ac84..f8d1a894f0e 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mock_helpers.rs @@ -63,11 +63,12 @@ impl GenesisConfigBuilder { pub(super) fn build(self) -> MockGenesisConfig { let mut genesis = default_genesis_config(); let config = &mut genesis.configuration.config; - config.coretime_cores = self.on_demand_cores; - config.on_demand_base_fee = self.on_demand_base_fee; - config.on_demand_fee_variability = self.on_demand_fee_variability; - config.on_demand_queue_max_size = self.on_demand_max_queue_size; - config.on_demand_target_queue_utilization = self.on_demand_target_queue_utilization; + config.scheduler_params.num_cores = self.on_demand_cores; + config.scheduler_params.on_demand_base_fee = self.on_demand_base_fee; + config.scheduler_params.on_demand_fee_variability = self.on_demand_fee_variability; + config.scheduler_params.on_demand_queue_max_size = self.on_demand_max_queue_size; + config.scheduler_params.on_demand_target_queue_utilization = + self.on_demand_target_queue_utilization; let paras = &mut genesis.paras.paras; for para_id in self.onboarded_on_demand_chains { diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 1b746e88694..bc450dc7812 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -201,10 +201,10 @@ pub mod pallet { let old_traffic = SpotTraffic::::get(); match Self::calculate_spot_traffic( old_traffic, - config.on_demand_queue_max_size, + config.scheduler_params.on_demand_queue_max_size, Self::queue_size(), - config.on_demand_target_queue_utilization, - config.on_demand_fee_variability, + config.scheduler_params.on_demand_target_queue_utilization, + config.scheduler_params.on_demand_fee_variability, ) { Ok(new_traffic) => { // Only update storage on change @@ -330,8 +330,9 @@ where let traffic = SpotTraffic::::get(); // Calculate spot price - let spot_price: BalanceOf = - traffic.saturating_mul_int(config.on_demand_base_fee.saturated_into::>()); + let spot_price: BalanceOf = traffic.saturating_mul_int( + config.scheduler_params.on_demand_base_fee.saturated_into::>(), + ); // Is the current price higher than `max_amount` ensure!(spot_price.le(&max_amount), Error::::SpotPriceHigherThanMaxAmount); @@ -450,7 +451,10 @@ where OnDemandQueue::::try_mutate(|queue| { // Abort transaction if queue is too large - ensure!(Self::queue_size() < config.on_demand_queue_max_size, Error::::QueueFull); + ensure!( + Self::queue_size() < config.scheduler_params.on_demand_queue_max_size, + Error::::QueueFull + ); match location { QueuePushDirection::Back => queue.push_back(order), QueuePushDirection::Front => queue.push_front(order), @@ -472,7 +476,7 @@ where target: LOG_TARGET, "Failed to fetch the on demand queue size, returning the max size." ); - return config.on_demand_queue_max_size + return config.scheduler_params.on_demand_queue_max_size }, } } diff --git a/polkadot/runtime/parachains/src/assigner_parachains.rs b/polkadot/runtime/parachains/src/assigner_parachains.rs index 34b5d3c1ec5..b5f342563e9 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains.rs @@ -27,7 +27,7 @@ use primitives::CoreIndex; use crate::{ configuration, paras, - scheduler::common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, + scheduler::common::{Assignment, AssignmentProvider}, }; pub use pallet::*; @@ -58,16 +58,6 @@ impl AssignmentProvider> for Pallet { /// this is a no-op in the case of a bulk assignment slot. fn push_back_assignment(_: Assignment) {} - fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig> { - AssignmentProviderConfig { - // The next assignment already goes to the same [`ParaId`], no timeout tracking needed. - max_availability_timeouts: 0, - // The next assignment already goes to the same [`ParaId`], this can be any number - // that's high enough to clear the time it takes to clear backing/availability. - ttl: 10u32.into(), - } - } - #[cfg(any(feature = "runtime-benchmarks", test))] fn get_mock_assignment(_: CoreIndex, para_id: primitives::Id) -> Assignment { Assignment::Bulk(para_id) diff --git a/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs b/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs index e6e9fb074aa..a46e114daea 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs @@ -60,11 +60,12 @@ impl GenesisConfigBuilder { pub(super) fn build(self) -> MockGenesisConfig { let mut genesis = default_genesis_config(); let config = &mut genesis.configuration.config; - config.coretime_cores = self.on_demand_cores; - config.on_demand_base_fee = self.on_demand_base_fee; - config.on_demand_fee_variability = self.on_demand_fee_variability; - config.on_demand_queue_max_size = self.on_demand_max_queue_size; - config.on_demand_target_queue_utilization = self.on_demand_target_queue_utilization; + config.scheduler_params.num_cores = self.on_demand_cores; + config.scheduler_params.on_demand_base_fee = self.on_demand_base_fee; + config.scheduler_params.on_demand_fee_variability = self.on_demand_fee_variability; + config.scheduler_params.on_demand_queue_max_size = self.on_demand_max_queue_size; + config.scheduler_params.on_demand_target_queue_utilization = + self.on_demand_target_queue_utilization; let paras = &mut genesis.paras.paras; for para_id in self.onboarded_on_demand_chains { diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 500bc70cfa7..0e4e659fef2 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -18,11 +18,7 @@ use crate::{ configuration, inclusion, initializer, paras, paras::ParaKind, paras_inherent, - scheduler::{ - self, - common::{AssignmentProvider, AssignmentProviderConfig}, - CoreOccupied, ParasEntry, - }, + scheduler::{self, common::AssignmentProvider, CoreOccupied, ParasEntry}, session_info, shared, }; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; @@ -197,7 +193,10 @@ impl BenchBuilder { /// Maximum number of validators per core (a.k.a. max validators per group). This value is used /// if none is explicitly set on the builder. pub(crate) fn fallback_max_validators_per_core() -> u32 { - configuration::Pallet::::config().max_validators_per_core.unwrap_or(5) + configuration::Pallet::::config() + .scheduler_params + .max_validators_per_core + .unwrap_or(5) } /// Specify a mapping of core index/ para id to the number of dispute statements for the @@ -684,7 +683,7 @@ impl BenchBuilder { // We are currently in Session 0, so these changes will take effect in Session 2. Self::setup_para_ids(used_cores); configuration::ActiveConfig::::mutate(|c| { - c.coretime_cores = used_cores; + c.scheduler_params.num_cores = used_cores; }); let validator_ids = Self::generate_validator_pairs(self.max_validators()); @@ -715,8 +714,7 @@ impl BenchBuilder { let cores = (0..used_cores) .into_iter() .map(|i| { - let AssignmentProviderConfig { ttl, .. } = - scheduler::Pallet::::assignment_provider_config(CoreIndex(i)); + let ttl = configuration::Pallet::::config().scheduler_params.ttl; // Load an assignment into provider so that one is present to pop let assignment = ::AssignmentProvider::get_mock_assignment( CoreIndex(i), @@ -731,8 +729,7 @@ impl BenchBuilder { let cores = (0..used_cores) .into_iter() .map(|i| { - let AssignmentProviderConfig { ttl, .. } = - scheduler::Pallet::::assignment_provider_config(CoreIndex(i)); + let ttl = configuration::Pallet::::config().scheduler_params.ttl; // Load an assignment into provider so that one is present to pop let assignment = ::AssignmentProvider::get_mock_assignment( diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index b0e9d03df88..364a15215d3 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -29,7 +29,6 @@ use primitives::{ vstaging::{ApprovalVotingParams, NodeFeatures}, AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, - ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill}; use sp_std::prelude::*; @@ -43,6 +42,7 @@ mod benchmarking; pub mod migration; pub use pallet::*; +use primitives::vstaging::SchedulerParams; const LOG_TARGET: &str = "runtime::configuration"; @@ -118,9 +118,9 @@ pub struct HostConfiguration { /// been completed. /// /// Note, there are situations in which `expected_at` in the past. For example, if - /// [`paras_availability_period`](Self::paras_availability_period) is less than the delay set - /// by this field or if PVF pre-check took more time than the delay. In such cases, the upgrade - /// is further at the earliest possible time determined by + /// [`paras_availability_period`](SchedulerParams::paras_availability_period) is less than the + /// delay set by this field or if PVF pre-check took more time than the delay. In such cases, + /// the upgrade is further at the earliest possible time determined by /// [`minimum_validation_upgrade_delay`](Self::minimum_validation_upgrade_delay). /// /// The rationale for this delay has to do with relay-chain reversions. In case there is an @@ -172,48 +172,7 @@ pub struct HostConfiguration { /// How long to keep code on-chain, in blocks. This should be sufficiently long that disputes /// have concluded. pub code_retention_period: BlockNumber, - /// How many cores are managed by the coretime chain. - pub coretime_cores: u32, - /// The number of retries that a on demand author has to submit their block. - pub on_demand_retries: u32, - /// The maximum queue size of the pay as you go module. - pub on_demand_queue_max_size: u32, - /// The target utilization of the spot price queue in percentages. - pub on_demand_target_queue_utilization: Perbill, - /// How quickly the fee rises in reaction to increased utilization. - /// The lower the number the slower the increase. - pub on_demand_fee_variability: Perbill, - /// The minimum amount needed to claim a slot in the spot pricing queue. - pub on_demand_base_fee: Balance, - /// The number of blocks an on demand claim stays in the scheduler's claimqueue before getting - /// cleared. This number should go reasonably higher than the number of blocks in the async - /// backing lookahead. - pub on_demand_ttl: BlockNumber, - /// How often parachain groups should be rotated across parachains. - /// - /// Must be non-zero. - pub group_rotation_frequency: BlockNumber, - /// The minimum availability period, in blocks. - /// - /// This is the minimum amount of blocks after a core became occupied that validators have time - /// to make the block available. - /// - /// This value only has effect on group rotations. If backers backed something at the end of - /// their rotation, the occupied core affects the backing group that comes afterwards. We limit - /// the effect one backing group can have on the next to `paras_availability_period` blocks. - /// - /// Within a group rotation there is no timeout as backers are only affecting themselves. - /// - /// Must be at least 1. With a value of 1, the previous group will not be able to negatively - /// affect the following group at the expense of a tight availability timeline at group - /// rotation boundaries. - pub paras_availability_period: BlockNumber, - /// The amount of blocks ahead to schedule paras. - pub scheduling_lookahead: u32, - /// The maximum number of validators to have per core. - /// - /// `None` means no maximum. - pub max_validators_per_core: Option, + /// The maximum number of validators to use for parachain consensus, period. /// /// `None` means no maximum. @@ -257,7 +216,7 @@ pub struct HostConfiguration { /// scheduled. This number is controlled by this field. /// /// This value should be greater than - /// [`paras_availability_period`](Self::paras_availability_period). + /// [`paras_availability_period`](SchedulerParams::paras_availability_period). pub minimum_validation_upgrade_delay: BlockNumber, /// The minimum number of valid backing statements required to consider a parachain candidate /// backable. @@ -266,6 +225,8 @@ pub struct HostConfiguration { pub node_features: NodeFeatures, /// Params used by approval-voting pub approval_voting_params: ApprovalVotingParams, + /// Scheduler parameters + pub scheduler_params: SchedulerParams, } impl> Default for HostConfiguration { @@ -275,8 +236,6 @@ impl> Default for HostConfiguration> Default for HostConfiguration> Default for HostConfiguration { ZeroMinimumBackingVotes, /// `executor_params` are inconsistent. InconsistentExecutorParams { inner: ExecutorParamError }, + /// TTL should be bigger than lookahead + LookaheadExceedsTTL, } impl HostConfiguration @@ -373,11 +326,11 @@ where pub fn check_consistency(&self) -> Result<(), InconsistentError> { use InconsistentError::*; - if self.group_rotation_frequency.is_zero() { + if self.scheduler_params.group_rotation_frequency.is_zero() { return Err(ZeroGroupRotationFrequency) } - if self.paras_availability_period.is_zero() { + if self.scheduler_params.paras_availability_period.is_zero() { return Err(ZeroParasAvailabilityPeriod) } @@ -399,10 +352,11 @@ where return Err(MaxPovSizeExceedHardLimit { max_pov_size: self.max_pov_size }) } - if self.minimum_validation_upgrade_delay <= self.paras_availability_period { + if self.minimum_validation_upgrade_delay <= self.scheduler_params.paras_availability_period + { return Err(MinimumValidationUpgradeDelayLessThanChainAvailabilityPeriod { minimum_validation_upgrade_delay: self.minimum_validation_upgrade_delay.clone(), - paras_availability_period: self.paras_availability_period.clone(), + paras_availability_period: self.scheduler_params.paras_availability_period.clone(), }) } @@ -447,6 +401,10 @@ where return Err(InconsistentExecutorParams { inner }) } + if self.scheduler_params.ttl < self.scheduler_params.lookahead.into() { + return Err(LookaheadExceedsTTL) + } + Ok(()) } @@ -471,6 +429,7 @@ pub trait WeightInfo { fn set_config_with_executor_params() -> Weight; fn set_config_with_perbill() -> Weight; fn set_node_feature() -> Weight; + fn set_config_with_scheduler_params() -> Weight; } pub struct TestWeightInfo; @@ -499,6 +458,9 @@ impl WeightInfo for TestWeightInfo { fn set_node_feature() -> Weight { Weight::MAX } + fn set_config_with_scheduler_params() -> Weight { + Weight::MAX + } } #[frame_support::pallet] @@ -520,7 +482,8 @@ pub mod pallet { /// v8-v9: /// v9-v10: /// v10-11: - const STORAGE_VERSION: StorageVersion = StorageVersion::new(11); + /// v11-12: + const STORAGE_VERSION: StorageVersion = StorageVersion::new(12); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -679,16 +642,16 @@ pub mod pallet { Self::set_coretime_cores_unchecked(new) } - /// Set the number of retries for a particular on demand. + /// Set the max number of times a claim may timeout on a core before it is abandoned #[pallet::call_index(7)] #[pallet::weight(( T::WeightInfo::set_config_with_u32(), DispatchClass::Operational, ))] - pub fn set_on_demand_retries(origin: OriginFor, new: u32) -> DispatchResult { + pub fn set_max_availability_timeouts(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_retries = new; + config.scheduler_params.max_availability_timeouts = new; }) } @@ -704,7 +667,7 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.group_rotation_frequency = new; + config.scheduler_params.group_rotation_frequency = new; }) } @@ -720,7 +683,7 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.paras_availability_period = new; + config.scheduler_params.paras_availability_period = new; }) } @@ -733,7 +696,7 @@ pub mod pallet { pub fn set_scheduling_lookahead(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.scheduling_lookahead = new; + config.scheduler_params.lookahead = new; }) } @@ -749,7 +712,7 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.max_validators_per_core = new; + config.scheduler_params.max_validators_per_core = new; }) } @@ -1141,7 +1104,7 @@ pub mod pallet { pub fn set_on_demand_base_fee(origin: OriginFor, new: Balance) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_base_fee = new; + config.scheduler_params.on_demand_base_fee = new; }) } @@ -1154,7 +1117,7 @@ pub mod pallet { pub fn set_on_demand_fee_variability(origin: OriginFor, new: Perbill) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_fee_variability = new; + config.scheduler_params.on_demand_fee_variability = new; }) } @@ -1167,7 +1130,7 @@ pub mod pallet { pub fn set_on_demand_queue_max_size(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_queue_max_size = new; + config.scheduler_params.on_demand_queue_max_size = new; }) } /// Set the on demand (parathreads) fee variability. @@ -1182,7 +1145,7 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_target_queue_utilization = new; + config.scheduler_params.on_demand_target_queue_utilization = new; }) } /// Set the on demand (parathreads) ttl in the claimqueue. @@ -1194,7 +1157,7 @@ pub mod pallet { pub fn set_on_demand_ttl(origin: OriginFor, new: BlockNumberFor) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.on_demand_ttl = new; + config.scheduler_params.ttl = new; }) } @@ -1244,6 +1207,22 @@ pub mod pallet { config.approval_voting_params = new; }) } + + /// Set scheduler-params. + #[pallet::call_index(55)] + #[pallet::weight(( + T::WeightInfo::set_config_with_scheduler_params(), + DispatchClass::Operational, + ))] + pub fn set_scheduler_params( + origin: OriginFor, + new: SchedulerParams>, + ) -> DispatchResult { + ensure_root(origin)?; + Self::schedule_config_update(|config| { + config.scheduler_params = new; + }) + } } impl Pallet { @@ -1252,7 +1231,7 @@ pub mod pallet { /// To be used if authorization is checked otherwise. pub fn set_coretime_cores_unchecked(new: u32) -> DispatchResult { Self::schedule_config_update(|config| { - config.coretime_cores = new; + config.scheduler_params.num_cores = new; }) } } @@ -1393,7 +1372,7 @@ impl Pallet { let base_config_consistent = base_config.check_consistency().is_ok(); // Now, we need to decide what the new configuration should be. - // We also move the `base_config` to `new_config` to empahsize that the base config was + // We also move the `base_config` to `new_config` to emphasize that the base config was // destroyed by the `updater`. updater(&mut base_config); let new_config = base_config; diff --git a/polkadot/runtime/parachains/src/configuration/benchmarking.rs b/polkadot/runtime/parachains/src/configuration/benchmarking.rs index 67daf1c4598..882b5aab096 100644 --- a/polkadot/runtime/parachains/src/configuration/benchmarking.rs +++ b/polkadot/runtime/parachains/src/configuration/benchmarking.rs @@ -51,6 +51,8 @@ benchmarks! { set_node_feature{}: set_node_feature(RawOrigin::Root, 255, true) + set_config_with_scheduler_params {} : set_scheduler_params(RawOrigin::Root, SchedulerParams::default()) + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext(Default::default()), diff --git a/polkadot/runtime/parachains/src/configuration/migration.rs b/polkadot/runtime/parachains/src/configuration/migration.rs index 2838b73092d..87b30b177e7 100644 --- a/polkadot/runtime/parachains/src/configuration/migration.rs +++ b/polkadot/runtime/parachains/src/configuration/migration.rs @@ -18,6 +18,7 @@ pub mod v10; pub mod v11; +pub mod v12; pub mod v6; pub mod v7; pub mod v8; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v11.rs b/polkadot/runtime/parachains/src/configuration/migration/v11.rs index a69061ac138..f6e0e043164 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v11.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v11.rs @@ -21,13 +21,122 @@ use frame_support::{ migrations::VersionedMigration, pallet_prelude::*, traits::Defensive, weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{vstaging::ApprovalVotingParams, SessionIndex}; +use primitives::{ + vstaging::ApprovalVotingParams, AsyncBackingParams, ExecutorParams, SessionIndex, + LEGACY_MIN_BACKING_VOTES, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, +}; use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; +use polkadot_core_primitives::Balance; +use primitives::vstaging::NodeFeatures; +use sp_arithmetic::Perbill; use super::v10::V10HostConfiguration; -type V11HostConfiguration = configuration::HostConfiguration; + +#[derive(Clone, Encode, PartialEq, Decode, Debug)] +pub struct V11HostConfiguration { + pub max_code_size: u32, + pub max_head_data_size: u32, + pub max_upward_queue_count: u32, + pub max_upward_queue_size: u32, + pub max_upward_message_size: u32, + pub max_upward_message_num_per_candidate: u32, + pub hrmp_max_message_num_per_candidate: u32, + pub validation_upgrade_cooldown: BlockNumber, + pub validation_upgrade_delay: BlockNumber, + pub async_backing_params: AsyncBackingParams, + pub max_pov_size: u32, + pub max_downward_message_size: u32, + pub hrmp_max_parachain_outbound_channels: u32, + pub hrmp_sender_deposit: Balance, + pub hrmp_recipient_deposit: Balance, + pub hrmp_channel_max_capacity: u32, + pub hrmp_channel_max_total_size: u32, + pub hrmp_max_parachain_inbound_channels: u32, + pub hrmp_channel_max_message_size: u32, + pub executor_params: ExecutorParams, + pub code_retention_period: BlockNumber, + pub coretime_cores: u32, + pub on_demand_retries: u32, + pub on_demand_queue_max_size: u32, + pub on_demand_target_queue_utilization: Perbill, + pub on_demand_fee_variability: Perbill, + pub on_demand_base_fee: Balance, + pub on_demand_ttl: BlockNumber, + pub group_rotation_frequency: BlockNumber, + pub paras_availability_period: BlockNumber, + pub scheduling_lookahead: u32, + pub max_validators_per_core: Option, + pub max_validators: Option, + pub dispute_period: SessionIndex, + pub dispute_post_conclusion_acceptance_period: BlockNumber, + pub no_show_slots: u32, + pub n_delay_tranches: u32, + pub zeroth_delay_tranche_width: u32, + pub needed_approvals: u32, + pub relay_vrf_modulo_samples: u32, + pub pvf_voting_ttl: SessionIndex, + pub minimum_validation_upgrade_delay: BlockNumber, + pub minimum_backing_votes: u32, + pub node_features: NodeFeatures, + pub approval_voting_params: ApprovalVotingParams, +} + +impl> Default for V11HostConfiguration { + fn default() -> Self { + Self { + async_backing_params: AsyncBackingParams { + max_candidate_depth: 0, + allowed_ancestry_len: 0, + }, + group_rotation_frequency: 1u32.into(), + paras_availability_period: 1u32.into(), + no_show_slots: 1u32.into(), + validation_upgrade_cooldown: Default::default(), + validation_upgrade_delay: 2u32.into(), + code_retention_period: Default::default(), + max_code_size: Default::default(), + max_pov_size: Default::default(), + max_head_data_size: Default::default(), + coretime_cores: Default::default(), + on_demand_retries: Default::default(), + scheduling_lookahead: 1, + max_validators_per_core: Default::default(), + max_validators: None, + dispute_period: 6, + dispute_post_conclusion_acceptance_period: 100.into(), + n_delay_tranches: Default::default(), + zeroth_delay_tranche_width: Default::default(), + needed_approvals: Default::default(), + relay_vrf_modulo_samples: Default::default(), + max_upward_queue_count: Default::default(), + max_upward_queue_size: Default::default(), + max_downward_message_size: Default::default(), + max_upward_message_size: Default::default(), + max_upward_message_num_per_candidate: Default::default(), + hrmp_sender_deposit: Default::default(), + hrmp_recipient_deposit: Default::default(), + hrmp_channel_max_capacity: Default::default(), + hrmp_channel_max_total_size: Default::default(), + hrmp_max_parachain_inbound_channels: Default::default(), + hrmp_channel_max_message_size: Default::default(), + hrmp_max_parachain_outbound_channels: Default::default(), + hrmp_max_message_num_per_candidate: Default::default(), + pvf_voting_ttl: 2u32.into(), + minimum_validation_upgrade_delay: 2.into(), + executor_params: Default::default(), + approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 1 }, + on_demand_queue_max_size: ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + on_demand_base_fee: 10_000_000u128, + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_target_queue_utilization: Perbill::from_percent(25), + on_demand_ttl: 5u32.into(), + minimum_backing_votes: LEGACY_MIN_BACKING_VOTES, + node_features: NodeFeatures::EMPTY, + } + } +} mod v10 { use super::*; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v12.rs b/polkadot/runtime/parachains/src/configuration/migration/v12.rs new file mode 100644 index 00000000000..4295a79893e --- /dev/null +++ b/polkadot/runtime/parachains/src/configuration/migration/v12.rs @@ -0,0 +1,349 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A module that is responsible for migration of storage. + +use crate::configuration::{self, migration::v11::V11HostConfiguration, Config, Pallet}; +use frame_support::{ + migrations::VersionedMigration, + pallet_prelude::*, + traits::{Defensive, OnRuntimeUpgrade}, +}; +use frame_system::pallet_prelude::BlockNumberFor; +use primitives::vstaging::SchedulerParams; +use sp_core::Get; +use sp_staking::SessionIndex; +use sp_std::vec::Vec; + +type V12HostConfiguration = configuration::HostConfiguration; + +mod v11 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V11HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V11HostConfiguration>)>, + OptionQuery, + >; +} + +mod v12 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V12HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V12HostConfiguration>)>, + OptionQuery, + >; +} + +pub type MigrateToV12 = VersionedMigration< + 11, + 12, + UncheckedMigrateToV12, + Pallet, + ::DbWeight, +>; + +pub struct UncheckedMigrateToV12(sp_std::marker::PhantomData); + +impl OnRuntimeUpgrade for UncheckedMigrateToV12 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV12"); + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + log::info!(target: configuration::LOG_TARGET, "HostConfiguration MigrateToV12 started"); + let weight_consumed = migrate_to_v12::(); + + log::info!(target: configuration::LOG_TARGET, "HostConfiguration MigrateToV12 executed successfully"); + + weight_consumed + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running post_upgrade() for HostConfiguration MigrateToV12"); + ensure!( + StorageVersion::get::>() >= 12, + "Storage version should be >= 12 after the migration" + ); + + Ok(()) + } +} + +fn migrate_to_v12() -> Weight { + // Unusual formatting is justified: + // - make it easier to verify that fields assign what they supposed to assign. + // - this code is transient and will be removed after all migrations are done. + // - this code is important enough to optimize for legibility sacrificing consistency. + #[rustfmt::skip] + let translate = + |pre: V11HostConfiguration>| -> + V12HostConfiguration> + { + V12HostConfiguration { + max_code_size : pre.max_code_size, + max_head_data_size : pre.max_head_data_size, + max_upward_queue_count : pre.max_upward_queue_count, + max_upward_queue_size : pre.max_upward_queue_size, + max_upward_message_size : pre.max_upward_message_size, + max_upward_message_num_per_candidate : pre.max_upward_message_num_per_candidate, + hrmp_max_message_num_per_candidate : pre.hrmp_max_message_num_per_candidate, + validation_upgrade_cooldown : pre.validation_upgrade_cooldown, + validation_upgrade_delay : pre.validation_upgrade_delay, + max_pov_size : pre.max_pov_size, + max_downward_message_size : pre.max_downward_message_size, + hrmp_sender_deposit : pre.hrmp_sender_deposit, + hrmp_recipient_deposit : pre.hrmp_recipient_deposit, + hrmp_channel_max_capacity : pre.hrmp_channel_max_capacity, + hrmp_channel_max_total_size : pre.hrmp_channel_max_total_size, + hrmp_max_parachain_inbound_channels : pre.hrmp_max_parachain_inbound_channels, + hrmp_max_parachain_outbound_channels : pre.hrmp_max_parachain_outbound_channels, + hrmp_channel_max_message_size : pre.hrmp_channel_max_message_size, + code_retention_period : pre.code_retention_period, + max_validators : pre.max_validators, + dispute_period : pre.dispute_period, + dispute_post_conclusion_acceptance_period: pre.dispute_post_conclusion_acceptance_period, + no_show_slots : pre.no_show_slots, + n_delay_tranches : pre.n_delay_tranches, + zeroth_delay_tranche_width : pre.zeroth_delay_tranche_width, + needed_approvals : pre.needed_approvals, + relay_vrf_modulo_samples : pre.relay_vrf_modulo_samples, + pvf_voting_ttl : pre.pvf_voting_ttl, + minimum_validation_upgrade_delay : pre.minimum_validation_upgrade_delay, + async_backing_params : pre.async_backing_params, + executor_params : pre.executor_params, + minimum_backing_votes : pre.minimum_backing_votes, + node_features : pre.node_features, + approval_voting_params : pre.approval_voting_params, + scheduler_params: SchedulerParams { + group_rotation_frequency : pre.group_rotation_frequency, + paras_availability_period : pre.paras_availability_period, + max_validators_per_core : pre.max_validators_per_core, + lookahead : pre.scheduling_lookahead, + num_cores : pre.coretime_cores, + max_availability_timeouts : pre.on_demand_retries, + on_demand_queue_max_size : pre.on_demand_queue_max_size, + on_demand_target_queue_utilization : pre.on_demand_target_queue_utilization, + on_demand_fee_variability : pre.on_demand_fee_variability, + on_demand_base_fee : pre.on_demand_base_fee, + ttl : pre.on_demand_ttl, + } + } + }; + + let v11 = v11::ActiveConfig::::get() + .defensive_proof("Could not decode old config") + .unwrap_or_default(); + let v12 = translate(v11); + v12::ActiveConfig::::set(Some(v12)); + + // Allowed to be empty. + let pending_v11 = v11::PendingConfigs::::get().unwrap_or_default(); + let mut pending_v12 = Vec::new(); + + for (session, v11) in pending_v11.into_iter() { + let v12 = translate(v11); + pending_v12.push((session, v12)); + } + v12::PendingConfigs::::set(Some(pending_v12.clone())); + + let num_configs = (pending_v12.len() + 1) as u64; + T::DbWeight::get().reads_writes(num_configs, num_configs) +} + +#[cfg(test)] +mod tests { + use primitives::LEGACY_MIN_BACKING_VOTES; + use sp_arithmetic::Perbill; + + use super::*; + use crate::mock::{new_test_ext, Test}; + + #[test] + fn v12_deserialized_from_actual_data() { + // Example how to get new `raw_config`: + // We'll obtain the raw_config at a specified a block + // Steps: + // 1. Go to Polkadot.js -> Developer -> Chain state -> Storage: https://polkadot.js.org/apps/#/chainstate + // 2. Set these parameters: + // 2.1. selected state query: configuration; activeConfig(): + // PolkadotRuntimeParachainsConfigurationHostConfiguration + // 2.2. blockhash to query at: + // 0xf89d3ab5312c5f70d396dc59612f0aa65806c798346f9db4b35278baed2e0e53 (the hash of + // the block) + // 2.3. Note the value of encoded storage key -> + // 0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385 for the + // referenced block. + // 2.4. You'll also need the decoded values to update the test. + // 3. Go to Polkadot.js -> Developer -> Chain state -> Raw storage + // 3.1 Enter the encoded storage key and you get the raw config. + + // This exceeds the maximal line width length, but that's fine, since this is not code and + // doesn't need to be read and also leaving it as one line allows to easily copy it. + let raw_config = + hex_literal::hex![ + "0000300000800000080000000000100000c8000005000000050000000200000002000000000000000000000000005000000010000400000000000000000000000000000000000000000000000000000000000000000000000800000000200000040000000000100000b004000000060000006400000002000000190000000000000002000000020000000200000005000000020000000001000000140000000400000001010000000100000001000000000000001027000080b2e60e80c3c9018096980000000000000000000000000005000000" + ]; + + let v12 = + V12HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); + + // We check only a sample of the values here. If we missed any fields or messed up data + // types that would skew all the fields coming after. + assert_eq!(v12.max_code_size, 3_145_728); + assert_eq!(v12.validation_upgrade_cooldown, 2); + assert_eq!(v12.max_pov_size, 5_242_880); + assert_eq!(v12.hrmp_channel_max_message_size, 1_048_576); + assert_eq!(v12.n_delay_tranches, 25); + assert_eq!(v12.minimum_validation_upgrade_delay, 5); + assert_eq!(v12.minimum_backing_votes, LEGACY_MIN_BACKING_VOTES); + assert_eq!(v12.approval_voting_params.max_approval_coalesce_count, 1); + assert_eq!(v12.scheduler_params.group_rotation_frequency, 20); + assert_eq!(v12.scheduler_params.paras_availability_period, 4); + assert_eq!(v12.scheduler_params.lookahead, 1); + assert_eq!(v12.scheduler_params.num_cores, 1); + assert_eq!(v12.scheduler_params.max_availability_timeouts, 0); + assert_eq!(v12.scheduler_params.on_demand_queue_max_size, 10_000); + assert_eq!( + v12.scheduler_params.on_demand_target_queue_utilization, + Perbill::from_percent(25) + ); + assert_eq!(v12.scheduler_params.on_demand_fee_variability, Perbill::from_percent(3)); + assert_eq!(v12.scheduler_params.on_demand_base_fee, 10_000_000); + assert_eq!(v12.scheduler_params.ttl, 5); + } + + #[test] + fn test_migrate_to_v12() { + // Host configuration has lots of fields. However, in this migration we only add one + // field. The most important part to check are a couple of the last fields. We also pick + // extra fields to check arbitrarily, e.g. depending on their position (i.e. the middle) and + // also their type. + // + // We specify only the picked fields and the rest should be provided by the `Default` + // implementation. That implementation is copied over between the two types and should work + // fine. + let v11 = V11HostConfiguration:: { + needed_approvals: 69, + paras_availability_period: 55, + hrmp_recipient_deposit: 1337, + max_pov_size: 1111, + minimum_validation_upgrade_delay: 20, + on_demand_ttl: 3, + on_demand_retries: 10, + ..Default::default() + }; + + let mut pending_configs = Vec::new(); + pending_configs.push((100, v11.clone())); + pending_configs.push((300, v11.clone())); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v10 version in the state. + v11::ActiveConfig::::set(Some(v11.clone())); + v11::PendingConfigs::::set(Some(pending_configs)); + + migrate_to_v12::(); + + let v12 = v12::ActiveConfig::::get().unwrap(); + assert_eq!(v12.approval_voting_params.max_approval_coalesce_count, 1); + + let mut configs_to_check = v12::PendingConfigs::::get().unwrap(); + configs_to_check.push((0, v12.clone())); + + for (_, v12) in configs_to_check { + #[rustfmt::skip] + { + assert_eq!(v11.max_code_size , v12.max_code_size); + assert_eq!(v11.max_head_data_size , v12.max_head_data_size); + assert_eq!(v11.max_upward_queue_count , v12.max_upward_queue_count); + assert_eq!(v11.max_upward_queue_size , v12.max_upward_queue_size); + assert_eq!(v11.max_upward_message_size , v12.max_upward_message_size); + assert_eq!(v11.max_upward_message_num_per_candidate , v12.max_upward_message_num_per_candidate); + assert_eq!(v11.hrmp_max_message_num_per_candidate , v12.hrmp_max_message_num_per_candidate); + assert_eq!(v11.validation_upgrade_cooldown , v12.validation_upgrade_cooldown); + assert_eq!(v11.validation_upgrade_delay , v12.validation_upgrade_delay); + assert_eq!(v11.max_pov_size , v12.max_pov_size); + assert_eq!(v11.max_downward_message_size , v12.max_downward_message_size); + assert_eq!(v11.hrmp_max_parachain_outbound_channels , v12.hrmp_max_parachain_outbound_channels); + assert_eq!(v11.hrmp_sender_deposit , v12.hrmp_sender_deposit); + assert_eq!(v11.hrmp_recipient_deposit , v12.hrmp_recipient_deposit); + assert_eq!(v11.hrmp_channel_max_capacity , v12.hrmp_channel_max_capacity); + assert_eq!(v11.hrmp_channel_max_total_size , v12.hrmp_channel_max_total_size); + assert_eq!(v11.hrmp_max_parachain_inbound_channels , v12.hrmp_max_parachain_inbound_channels); + assert_eq!(v11.hrmp_channel_max_message_size , v12.hrmp_channel_max_message_size); + assert_eq!(v11.code_retention_period , v12.code_retention_period); + assert_eq!(v11.max_validators , v12.max_validators); + assert_eq!(v11.dispute_period , v12.dispute_period); + assert_eq!(v11.no_show_slots , v12.no_show_slots); + assert_eq!(v11.n_delay_tranches , v12.n_delay_tranches); + assert_eq!(v11.zeroth_delay_tranche_width , v12.zeroth_delay_tranche_width); + assert_eq!(v11.needed_approvals , v12.needed_approvals); + assert_eq!(v11.relay_vrf_modulo_samples , v12.relay_vrf_modulo_samples); + assert_eq!(v11.pvf_voting_ttl , v12.pvf_voting_ttl); + assert_eq!(v11.minimum_validation_upgrade_delay , v12.minimum_validation_upgrade_delay); + assert_eq!(v11.async_backing_params.allowed_ancestry_len, v12.async_backing_params.allowed_ancestry_len); + assert_eq!(v11.async_backing_params.max_candidate_depth , v12.async_backing_params.max_candidate_depth); + assert_eq!(v11.executor_params , v12.executor_params); + assert_eq!(v11.minimum_backing_votes , v12.minimum_backing_votes); + assert_eq!(v11.group_rotation_frequency , v12.scheduler_params.group_rotation_frequency); + assert_eq!(v11.paras_availability_period , v12.scheduler_params.paras_availability_period); + assert_eq!(v11.max_validators_per_core , v12.scheduler_params.max_validators_per_core); + assert_eq!(v11.scheduling_lookahead , v12.scheduler_params.lookahead); + assert_eq!(v11.coretime_cores , v12.scheduler_params.num_cores); + assert_eq!(v11.on_demand_retries , v12.scheduler_params.max_availability_timeouts); + assert_eq!(v11.on_demand_queue_max_size , v12.scheduler_params.on_demand_queue_max_size); + assert_eq!(v11.on_demand_target_queue_utilization , v12.scheduler_params.on_demand_target_queue_utilization); + assert_eq!(v11.on_demand_fee_variability , v12.scheduler_params.on_demand_fee_variability); + assert_eq!(v11.on_demand_base_fee , v12.scheduler_params.on_demand_base_fee); + assert_eq!(v11.on_demand_ttl , v12.scheduler_params.ttl); + }; // ; makes this a statement. `rustfmt::skip` cannot be put on an expression. + } + }); + } + + // Test that migration doesn't panic in case there are no pending configurations upgrades in + // pallet's storage. + #[test] + fn test_migrate_to_v12_no_pending() { + let v11 = V11HostConfiguration::::default(); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v10 version in the state. + v11::ActiveConfig::::set(Some(v11)); + // Ensure there are no pending configs. + v12::PendingConfigs::::set(None); + + // Shouldn't fail. + migrate_to_v12::(); + }); + } +} diff --git a/polkadot/runtime/parachains/src/configuration/tests.rs b/polkadot/runtime/parachains/src/configuration/tests.rs index f1570017dd7..254511231ca 100644 --- a/polkadot/runtime/parachains/src/configuration/tests.rs +++ b/polkadot/runtime/parachains/src/configuration/tests.rs @@ -226,8 +226,11 @@ fn invariants() { ); ActiveConfig::::put(HostConfiguration { - paras_availability_period: 10, minimum_validation_upgrade_delay: 11, + scheduler_params: SchedulerParams { + paras_availability_period: 10, + ..Default::default() + }, ..Default::default() }); assert_err!( @@ -283,12 +286,6 @@ fn setting_pending_config_members() { max_code_size: 100_000, max_pov_size: 1024, max_head_data_size: 1_000, - coretime_cores: 2, - on_demand_retries: 5, - group_rotation_frequency: 20, - paras_availability_period: 10, - scheduling_lookahead: 3, - max_validators_per_core: None, max_validators: None, dispute_period: 239, dispute_post_conclusion_acceptance_period: 10, @@ -314,13 +311,21 @@ fn setting_pending_config_members() { minimum_validation_upgrade_delay: 20, executor_params: Default::default(), approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 1 }, - on_demand_queue_max_size: 10_000u32, - on_demand_base_fee: 10_000_000u128, - on_demand_fee_variability: Perbill::from_percent(3), - on_demand_target_queue_utilization: Perbill::from_percent(25), - on_demand_ttl: 5u32, minimum_backing_votes: 5, node_features: bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], + scheduler_params: SchedulerParams { + group_rotation_frequency: 20, + paras_availability_period: 10, + max_validators_per_core: None, + lookahead: 3, + num_cores: 2, + max_availability_timeouts: 5, + on_demand_queue_max_size: 10_000u32, + on_demand_base_fee: 10_000_000u128, + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_target_queue_utilization: Perbill::from_percent(25), + ttl: 5u32, + }, }; Configuration::set_validation_upgrade_cooldown( @@ -342,13 +347,19 @@ fn setting_pending_config_members() { Configuration::set_max_pov_size(RuntimeOrigin::root(), new_config.max_pov_size).unwrap(); Configuration::set_max_head_data_size(RuntimeOrigin::root(), new_config.max_head_data_size) .unwrap(); - Configuration::set_coretime_cores(RuntimeOrigin::root(), new_config.coretime_cores) - .unwrap(); - Configuration::set_on_demand_retries(RuntimeOrigin::root(), new_config.on_demand_retries) - .unwrap(); + Configuration::set_coretime_cores( + RuntimeOrigin::root(), + new_config.scheduler_params.num_cores, + ) + .unwrap(); + Configuration::set_max_availability_timeouts( + RuntimeOrigin::root(), + new_config.scheduler_params.max_availability_timeouts, + ) + .unwrap(); Configuration::set_group_rotation_frequency( RuntimeOrigin::root(), - new_config.group_rotation_frequency, + new_config.scheduler_params.group_rotation_frequency, ) .unwrap(); // This comes out of order to satisfy the validity criteria for the chain and thread @@ -360,17 +371,17 @@ fn setting_pending_config_members() { .unwrap(); Configuration::set_paras_availability_period( RuntimeOrigin::root(), - new_config.paras_availability_period, + new_config.scheduler_params.paras_availability_period, ) .unwrap(); Configuration::set_scheduling_lookahead( RuntimeOrigin::root(), - new_config.scheduling_lookahead, + new_config.scheduler_params.lookahead, ) .unwrap(); Configuration::set_max_validators_per_core( RuntimeOrigin::root(), - new_config.max_validators_per_core, + new_config.scheduler_params.max_validators_per_core, ) .unwrap(); Configuration::set_max_validators(RuntimeOrigin::root(), new_config.max_validators) diff --git a/polkadot/runtime/parachains/src/coretime/migration.rs b/polkadot/runtime/parachains/src/coretime/migration.rs index 9bc0a20ef5b..03fecc58570 100644 --- a/polkadot/runtime/parachains/src/coretime/migration.rs +++ b/polkadot/runtime/parachains/src/coretime/migration.rs @@ -114,7 +114,7 @@ mod v_coretime { let legacy_paras = paras::Parachains::::get(); let config = >::config(); - let total_core_count = config.coretime_cores + legacy_paras.len() as u32; + let total_core_count = config.scheduler_params.num_cores + legacy_paras.len() as u32; let dmp_queue_size = crate::dmp::Pallet::::dmq_contents(T::BrokerId::get().into()).len() as u32; @@ -150,7 +150,7 @@ mod v_coretime { // Migrate to Coretime. // - // NOTE: Also migrates coretime_cores config value in configuration::ActiveConfig. + // NOTE: Also migrates `num_cores` config value in configuration::ActiveConfig. fn migrate_to_coretime< T: Config, SendXcm: xcm::v4::SendXcm, @@ -176,8 +176,8 @@ mod v_coretime { } let config = >::config(); - // coretime_cores was on_demand_cores until now: - for on_demand in 0..config.coretime_cores { + // num_cores was on_demand_cores until now: + for on_demand in 0..config.scheduler_params.num_cores { let core = CoreIndex(legacy_count.saturating_add(on_demand as _)); let r = assigner_coretime::Pallet::::assign_core( core, @@ -189,9 +189,9 @@ mod v_coretime { log::error!("Creating assignment for existing on-demand core, failed: {:?}", err); } } - let total_cores = config.coretime_cores + legacy_count; + let total_cores = config.scheduler_params.num_cores + legacy_count; configuration::ActiveConfig::::mutate(|c| { - c.coretime_cores = total_cores; + c.scheduler_params.num_cores = total_cores; }); if let Err(err) = migrate_send_assignments_to_coretime_chain::() { @@ -200,7 +200,9 @@ mod v_coretime { let single_weight = ::WeightInfo::assign_core(1); single_weight - .saturating_mul(u64::from(legacy_count.saturating_add(config.coretime_cores))) + .saturating_mul(u64::from( + legacy_count.saturating_add(config.scheduler_params.num_cores), + )) // Second read from sending assignments to the coretime chain. .saturating_add(T::DbWeight::get().reads_writes(2, 1)) } @@ -244,7 +246,8 @@ mod v_coretime { Some(mk_coretime_call(crate::coretime::CoretimeCalls::SetLease(p.into(), time_slice))) }); - let core_count: u16 = configuration::Pallet::::config().coretime_cores.saturated_into(); + let core_count: u16 = + configuration::Pallet::::config().scheduler_params.num_cores.saturated_into(); let set_core_count = iter::once(mk_coretime_call( crate::coretime::CoretimeCalls::NotifyCoreCount(core_count), )); diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index 531f5c2e4e4..eb9646d7e86 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -214,8 +214,8 @@ impl Pallet { } pub fn initializer_on_new_session(notification: &SessionChangeNotification>) { - let old_core_count = notification.prev_config.coretime_cores; - let new_core_count = notification.new_config.coretime_cores; + let old_core_count = notification.prev_config.scheduler_params.num_cores; + let new_core_count = notification.new_config.scheduler_params.num_cores; if new_core_count != old_core_count { let core_count: u16 = new_core_count.saturated_into(); let message = Xcm(vec![ diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index d2b5a67c3e4..3fe7d7f0c7d 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -47,10 +47,10 @@ use test_helpers::{dummy_collator, dummy_collator_signature, dummy_validation_co fn default_config() -> HostConfiguration { let mut config = HostConfiguration::default(); - config.coretime_cores = 1; + config.scheduler_params.num_cores = 1; config.max_code_size = 0b100000; config.max_head_data_size = 0b100000; - config.group_rotation_frequency = u32::MAX; + config.scheduler_params.group_rotation_frequency = u32::MAX; config } @@ -224,7 +224,7 @@ pub(crate) fn run_to_block( } pub(crate) fn expected_bits() -> usize { - Paras::parachains().len() + Configuration::config().coretime_cores as usize + Paras::parachains().len() + Configuration::config().scheduler_params.num_cores as usize } fn default_bitfield() -> AvailabilityBitfield { @@ -386,7 +386,7 @@ fn collect_pending_cleans_up_pending() { (thread_a, ParaKind::Parathread), ]; let mut config = genesis_config(paras); - config.configuration.config.group_rotation_frequency = 3; + config.configuration.config.scheduler_params.group_rotation_frequency = 3; new_test_ext(config).execute_with(|| { let default_candidate = TestCandidateBuilder::default().build(); >::insert( @@ -2062,7 +2062,7 @@ fn check_allowed_relay_parents() { } let validator_public = validator_pubkeys(&validators); let mut config = genesis_config(paras); - config.configuration.config.group_rotation_frequency = 1; + config.configuration.config.scheduler_params.group_rotation_frequency = 1; new_test_ext(config).execute_with(|| { shared::Pallet::::set_active_validators_ascending(validator_public.clone()); diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 1925ca19501..e18be03bdeb 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -23,7 +23,7 @@ use crate::{ initializer, origin, paras, paras::ParaKind, paras_inherent, scheduler, - scheduler::common::{AssignmentProvider, AssignmentProviderConfig}, + scheduler::common::AssignmentProvider, session_info, shared, ParaId, }; use frame_support::pallet_prelude::*; @@ -463,10 +463,6 @@ pub mod mock_assigner { pub(super) type MockAssignmentQueue = StorageValue<_, VecDeque, ValueQuery>; - #[pallet::storage] - pub(super) type MockProviderConfig = - StorageValue<_, AssignmentProviderConfig, OptionQuery>; - #[pallet::storage] pub(super) type MockCoreCount = StorageValue<_, u32, OptionQuery>; } @@ -478,12 +474,6 @@ pub mod mock_assigner { MockAssignmentQueue::::mutate(|queue| queue.push_back(assignment)); } - // This configuration needs to be customized to service `get_provider_config` in - // scheduler tests. - pub fn set_assignment_provider_config(config: AssignmentProviderConfig) { - MockProviderConfig::::set(Some(config)); - } - // Allows for customized core count in scheduler tests, rather than a core count // derived from on-demand config + parachain count. pub fn set_core_count(count: u32) { @@ -512,17 +502,6 @@ pub mod mock_assigner { // in the mock assigner. fn push_back_assignment(_assignment: Assignment) {} - // Gets the provider config we set earlier using `set_assignment_provider_config`, falling - // back to the on demand parachain configuration if none was set. - fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig { - match MockProviderConfig::::get() { - Some(config) => config, - None => AssignmentProviderConfig { - max_availability_timeouts: 1, - ttl: BlockNumber::from(5u32), - }, - } - } #[cfg(any(feature = "runtime-benchmarks", test))] fn get_mock_assignment(_: CoreIndex, para_id: ParaId) -> Assignment { Assignment::Bulk(para_id) diff --git a/polkadot/runtime/parachains/src/paras/tests.rs b/polkadot/runtime/parachains/src/paras/tests.rs index 24ea919ec87..262ec9d3fdb 100644 --- a/polkadot/runtime/parachains/src/paras/tests.rs +++ b/polkadot/runtime/parachains/src/paras/tests.rs @@ -17,7 +17,7 @@ use super::*; use frame_support::{assert_err, assert_ok, assert_storage_noop}; use keyring::Sr25519Keyring; -use primitives::{BlockNumber, PARACHAIN_KEY_TYPE_ID}; +use primitives::{vstaging::SchedulerParams, BlockNumber, PARACHAIN_KEY_TYPE_ID}; use sc_keystore::LocalKeystore; use sp_keystore::{Keystore, KeystorePtr}; use std::sync::Arc; @@ -909,7 +909,10 @@ fn full_parachain_cleanup_storage() { minimum_validation_upgrade_delay: 2, // Those are not relevant to this test. However, HostConfiguration is still a // subject for the consistency check. - paras_availability_period: 1, + scheduler_params: SchedulerParams { + paras_availability_period: 1, + ..Default::default() + }, ..Default::default() }, }, diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index defb2f4404f..b7285ec884a 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -27,13 +27,14 @@ mod enter { builder::{Bench, BenchBuilder}, mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, MockGenesisConfig, Test}, scheduler::{ - common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, + common::{Assignment, AssignmentProvider}, ParasEntry, }, }; use assert_matches::assert_matches; use frame_support::assert_ok; use frame_system::limits; + use primitives::vstaging::SchedulerParams; use sp_runtime::Perbill; use sp_std::collections::btree_map::BTreeMap; @@ -87,7 +88,7 @@ mod enter { // `create_inherent` and will not cause `enter` to early. fn include_backed_candidates() { let config = MockGenesisConfig::default(); - assert!(config.configuration.config.scheduling_lookahead > 0); + assert!(config.configuration.config.scheduler_params.lookahead > 0); new_test_ext(config).execute_with(|| { let dispute_statements = BTreeMap::new(); @@ -625,7 +626,7 @@ mod enter { #[test] fn limit_candidates_over_weight_1() { let config = MockGenesisConfig::default(); - assert!(config.configuration.config.scheduling_lookahead > 0); + assert!(config.configuration.config.scheduler_params.lookahead > 0); new_test_ext(config).execute_with(|| { // Create the inherent data for this block @@ -706,8 +707,8 @@ mod enter { let cores = (0..used_cores) .into_iter() .map(|i| { - let AssignmentProviderConfig { ttl, .. } = - scheduler::Pallet::::assignment_provider_config(CoreIndex(i)); + let SchedulerParams { ttl, .. } = + >::config().scheduler_params; // Load an assignment into provider so that one is present to pop let assignment = ::AssignmentProvider::get_mock_assignment( diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index 3dbb050fb4e..61ab36dbe96 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -51,7 +51,7 @@ use sp_std::{ pub mod common; -use common::{Assignment, AssignmentProvider, AssignmentProviderConfig}; +use common::{Assignment, AssignmentProvider}; pub use pallet::*; @@ -222,7 +222,7 @@ impl Pallet { let n_cores = core::cmp::max( T::AssignmentProvider::session_core_count(), - match config.max_validators_per_core { + match config.scheduler_params.max_validators_per_core { Some(x) if x != 0 => validators.len() as u32 / x, _ => 0, }, @@ -350,6 +350,7 @@ impl Pallet { fn drop_expired_claims_from_claimqueue() { let now = >::block_number(); let availability_cores = AvailabilityCores::::get(); + let ttl = >::config().scheduler_params.ttl; ClaimQueue::::mutate(|cq| { for (idx, _) in (0u32..).zip(availability_cores) { @@ -382,8 +383,6 @@ impl Pallet { if let Some(assignment) = T::AssignmentProvider::pop_assignment_for_core(core_idx) { - let AssignmentProviderConfig { ttl, .. } = - T::AssignmentProvider::get_provider_config(core_idx); core_claimqueue.push_back(ParasEntry::new(assignment, now + ttl)); } } @@ -428,7 +427,7 @@ impl Pallet { } let rotations_since_session_start: BlockNumberFor = - (at - session_start_block) / config.group_rotation_frequency; + (at - session_start_block) / config.scheduler_params.group_rotation_frequency; let rotations_since_session_start = as TryInto>::try_into(rotations_since_session_start) @@ -460,9 +459,9 @@ impl Pallet { // Note: blocks backed in this rotation will never time out here as backed_in + // config.paras_availability_period will always be > now for these blocks, as // otherwise above condition would not be true. - pending_since + config.paras_availability_period + pending_since + config.scheduler_params.paras_availability_period } else { - next_rotation + config.paras_availability_period + next_rotation + config.scheduler_params.paras_availability_period }; AvailabilityTimeoutStatus { timed_out: time_out_at <= now, live_until: time_out_at } @@ -478,7 +477,8 @@ impl Pallet { let now = >::block_number() + One::one(); let rotation_info = Self::group_rotation_info(now); - let current_window = rotation_info.last_rotation_at() + config.paras_availability_period; + let current_window = + rotation_info.last_rotation_at() + config.scheduler_params.paras_availability_period; now < current_window } @@ -488,7 +488,7 @@ impl Pallet { ) -> GroupRotationInfo> { let session_start_block = Self::session_start_block(); let group_rotation_frequency = - >::config().group_rotation_frequency; + >::config().scheduler_params.group_rotation_frequency; GroupRotationInfo { session_start_block, now, group_rotation_frequency } } @@ -508,6 +508,8 @@ impl Pallet { /// Return the next thing that will be scheduled on this core assuming it is currently /// occupied and the candidate occupying it times out. pub(crate) fn next_up_on_time_out(core: CoreIndex) -> Option { + let max_availability_timeouts = + >::config().scheduler_params.max_availability_timeouts; Self::next_up_on_available(core).or_else(|| { // Or, if none, the claim currently occupying the core, // as it would be put back on the queue after timing out if number of retries is not at @@ -515,16 +517,12 @@ impl Pallet { let cores = AvailabilityCores::::get(); cores.get(core.0 as usize).and_then(|c| match c { CoreOccupied::Free => None, - CoreOccupied::Paras(pe) => { - let AssignmentProviderConfig { max_availability_timeouts, .. } = - T::AssignmentProvider::get_provider_config(core); - + CoreOccupied::Paras(pe) => if pe.availability_timeouts < max_availability_timeouts { Some(Self::paras_entry_to_scheduled_core(pe)) } else { None - } - }, + }, }) }) } @@ -566,7 +564,7 @@ impl Pallet { // ClaimQueue related functions // fn claimqueue_lookahead() -> u32 { - >::config().scheduling_lookahead + >::config().scheduler_params.lookahead } /// Frees cores and fills the free claimqueue spots by popping from the `AssignmentProvider`. @@ -585,15 +583,15 @@ impl Pallet { let n_lookahead = Self::claimqueue_lookahead().max(1); let n_session_cores = T::AssignmentProvider::session_core_count(); let cq = ClaimQueue::::get(); - let ttl = >::config().on_demand_ttl; + let config = >::config(); + let max_availability_timeouts = config.scheduler_params.max_availability_timeouts; + let ttl = config.scheduler_params.ttl; for core_idx in 0..n_session_cores { let core_idx = CoreIndex::from(core_idx); // add previously timedout paras back into the queue if let Some(mut entry) = timedout_paras.remove(&core_idx) { - let AssignmentProviderConfig { max_availability_timeouts, .. } = - T::AssignmentProvider::get_provider_config(core_idx); if entry.availability_timeouts < max_availability_timeouts { // Increment the timeout counter. entry.availability_timeouts += 1; @@ -668,13 +666,6 @@ impl Pallet { .filter_map(|(core_idx, v)| v.front().map(|e| (core_idx, e.assignment.para_id()))) } - #[cfg(any(feature = "runtime-benchmarks", test))] - pub(crate) fn assignment_provider_config( - core_idx: CoreIndex, - ) -> AssignmentProviderConfig> { - T::AssignmentProvider::get_provider_config(core_idx) - } - #[cfg(any(feature = "try-runtime", test))] fn claimqueue_len() -> usize { ClaimQueue::::get().iter().map(|la_vec| la_vec.1.len()).sum() diff --git a/polkadot/runtime/parachains/src/scheduler/common.rs b/polkadot/runtime/parachains/src/scheduler/common.rs index 2eb73385803..66a4e6d30be 100644 --- a/polkadot/runtime/parachains/src/scheduler/common.rs +++ b/polkadot/runtime/parachains/src/scheduler/common.rs @@ -48,22 +48,10 @@ impl Assignment { } } -#[derive(Encode, Decode, TypeInfo)] -/// A set of variables required by the scheduler in order to operate. -pub struct AssignmentProviderConfig { - /// How many times a collation can time out on availability. - /// Zero timeouts still means that a collation can be provided as per the slot auction - /// assignment provider. - pub max_availability_timeouts: u32, - - /// How long the collator has to provide a collation to the backing group before being dropped. - pub ttl: BlockNumber, -} - pub trait AssignmentProvider { /// Pops an [`Assignment`] from the provider for a specified [`CoreIndex`]. /// - /// This is where assignments come into existance. + /// This is where assignments come into existence. fn pop_assignment_for_core(core_idx: CoreIndex) -> Option; /// A previously popped `Assignment` has been fully processed. @@ -77,14 +65,11 @@ pub trait AssignmentProvider { /// Push back a previously popped assignment. /// /// If the assignment could not be processed within the current session, it can be pushed back - /// to the assignment provider in order to be poppped again later. + /// to the assignment provider in order to be popped again later. /// /// This is the second way the life of an assignment can come to an end. fn push_back_assignment(assignment: Assignment); - /// Returns a set of variables needed by the scheduler - fn get_provider_config(core_idx: CoreIndex) -> AssignmentProviderConfig; - /// Push some assignment for mocking/benchmarks purposes. /// /// Useful for benchmarks and testing. The returned assignment is "valid" and can if need be diff --git a/polkadot/runtime/parachains/src/scheduler/tests.rs b/polkadot/runtime/parachains/src/scheduler/tests.rs index 9af23ce64bd..28b3a6b4f5d 100644 --- a/polkadot/runtime/parachains/src/scheduler/tests.rs +++ b/polkadot/runtime/parachains/src/scheduler/tests.rs @@ -18,7 +18,9 @@ use super::*; use frame_support::assert_ok; use keyring::Sr25519Keyring; -use primitives::{BlockNumber, SessionIndex, ValidationCode, ValidatorId}; +use primitives::{ + vstaging::SchedulerParams, BlockNumber, SessionIndex, ValidationCode, ValidatorId, +}; use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use crate::{ @@ -103,15 +105,19 @@ fn run_to_end_of_block( fn default_config() -> HostConfiguration { HostConfiguration { - coretime_cores: 3, - group_rotation_frequency: 10, - paras_availability_period: 3, - scheduling_lookahead: 2, // This field does not affect anything that scheduler does. However, `HostConfiguration` // is still a subject to consistency test. It requires that // `minimum_validation_upgrade_delay` is greater than `chain_availability_period` and // `thread_availability_period`. minimum_validation_upgrade_delay: 6, + scheduler_params: SchedulerParams { + group_rotation_frequency: 10, + paras_availability_period: 3, + lookahead: 2, + num_cores: 3, + max_availability_timeouts: 1, + ..Default::default() + }, ..Default::default() } } @@ -155,7 +161,7 @@ fn scheduled_entries() -> impl Iterator(vec![para_a])); } else { assert!(!claimqueue_contains_para_ids::(vec![para_a])); @@ -822,7 +825,7 @@ fn on_demand_claims_are_pruned_after_timing_out() { assert!(!availability_cores_contains_para_ids::(vec![para_a])); // #25 - now += max_retries + 2; + now += max_timeouts + 2; // Add assignment back to the mix. MockAssigner::add_test_assignment(assignment_a.clone()); @@ -833,7 +836,7 @@ fn on_demand_claims_are_pruned_after_timing_out() { // #26 now += 1; // Run to block #n but this time have group 1 conclude the availabilty. - for n in now..=(now + max_retries + 1) { + for n in now..=(now + max_timeouts + 1) { // #n run_to_block(n, |_| None); // Time out core 0 if group 0 is assigned to it, if group 1 is assigned, conclude. @@ -874,10 +877,8 @@ fn on_demand_claims_are_pruned_after_timing_out() { fn availability_predicate_works() { let genesis_config = genesis_config(&default_config()); - let HostConfiguration { group_rotation_frequency, paras_availability_period, .. } = - default_config(); - - assert!(paras_availability_period < group_rotation_frequency); + let SchedulerParams { group_rotation_frequency, paras_availability_period, .. } = + default_config().scheduler_params; new_test_ext(genesis_config).execute_with(|| { run_to_block(1 + paras_availability_period, |_| None); @@ -1044,7 +1045,7 @@ fn next_up_on_time_out_reuses_claim_if_nothing_queued() { #[test] fn session_change_requires_reschedule_dropping_removed_paras() { let mut config = default_config(); - config.scheduling_lookahead = 1; + config.scheduler_params.lookahead = 1; let genesis_config = genesis_config(&config); let para_a = ParaId::from(1_u32); @@ -1056,7 +1057,7 @@ fn session_change_requires_reschedule_dropping_removed_paras() { new_test_ext(genesis_config).execute_with(|| { // Setting explicit core count MockAssigner::set_core_count(5); - let assignment_provider_ttl = MockAssigner::get_provider_config(CoreIndex::from(0)).ttl; + let coretime_ttl = >::config().scheduler_params.ttl; schedule_blank_para(para_a); schedule_blank_para(para_b); @@ -1120,7 +1121,7 @@ fn session_change_requires_reschedule_dropping_removed_paras() { vec![ParasEntry::new( Assignment::Bulk(para_a), // At end of block 2 - assignment_provider_ttl + 2 + coretime_ttl + 2 )] .into_iter() .collect() @@ -1169,7 +1170,7 @@ fn session_change_requires_reschedule_dropping_removed_paras() { vec![ParasEntry::new( Assignment::Bulk(para_a), // At block 3 - assignment_provider_ttl + 3 + coretime_ttl + 3 )] .into_iter() .collect() @@ -1179,7 +1180,7 @@ fn session_change_requires_reschedule_dropping_removed_paras() { vec![ParasEntry::new( Assignment::Bulk(para_b), // At block 3 - assignment_provider_ttl + 3 + coretime_ttl + 3 )] .into_iter() .collect() diff --git a/polkadot/runtime/parachains/src/session_info/tests.rs b/polkadot/runtime/parachains/src/session_info/tests.rs index 92a50575ded..a5bfeae0745 100644 --- a/polkadot/runtime/parachains/src/session_info/tests.rs +++ b/polkadot/runtime/parachains/src/session_info/tests.rs @@ -25,7 +25,7 @@ use crate::{ util::take_active_subset, }; use keyring::Sr25519Keyring; -use primitives::{BlockNumber, ValidatorId, ValidatorIndex}; +use primitives::{vstaging::SchedulerParams, BlockNumber, ValidatorId, ValidatorIndex}; fn run_to_block( to: BlockNumber, @@ -62,9 +62,9 @@ fn run_to_block( fn default_config() -> HostConfiguration { HostConfiguration { - coretime_cores: 1, dispute_period: 2, needed_approvals: 3, + scheduler_params: SchedulerParams { num_cores: 1, ..Default::default() }, ..Default::default() } } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 173301e1ad9..f3c0c3d6bd8 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1658,6 +1658,7 @@ pub mod migrations { parachains_configuration::migration::v11::MigrateToV11, // This needs to come after the `parachains_configuration` above as we are reading the configuration. coretime::migration::MigrateToCoretime, + parachains_configuration::migration::v12::MigrateToV12, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs index 34541b83597..ca0575cb1b6 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -58,8 +58,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_793_000 picoseconds. - Weight::from_parts(8_192_000, 0) + // Minimum execution time: 7_789_000 picoseconds. + Weight::from_parts(8_269_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -74,8 +74,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_819_000 picoseconds. - Weight::from_parts(8_004_000, 0) + // Minimum execution time: 7_851_000 picoseconds. + Weight::from_parts(8_152_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -90,8 +90,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_760_000 picoseconds. - Weight::from_parts(8_174_000, 0) + // Minimum execution time: 7_960_000 picoseconds. + Weight::from_parts(8_276_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -116,8 +116,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_814_000 picoseconds. - Weight::from_parts(8_098_000, 0) + // Minimum execution time: 7_912_000 picoseconds. + Weight::from_parts(8_164_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -132,8 +132,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_028_000 picoseconds. - Weight::from_parts(10_386_000, 0) + // Minimum execution time: 9_782_000 picoseconds. + Weight::from_parts(10_373_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -148,8 +148,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_867_000 picoseconds. - Weight::from_parts(8_191_000, 0) + // Minimum execution time: 7_870_000 picoseconds. + Weight::from_parts(8_274_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -164,8 +164,24 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_158_000 picoseconds. - Weight::from_parts(10_430_000, 0) + // Minimum execution time: 9_960_000 picoseconds. + Weight::from_parts(10_514_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_scheduler_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_913_000 picoseconds. + Weight::from_parts(8_338_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 91047d953f5..a7772303a95 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1699,6 +1699,7 @@ pub mod migrations { // Migrate Identity pallet for Usernames pallet_identity::migration::versioned::V0ToV1, parachains_configuration::migration::v11::MigrateToV11, + parachains_configuration::migration::v12::MigrateToV12, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, // Migrate from legacy lease to coretime. Needs to run after configuration v11 diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs index 3a4813b667c..8fa3207c644 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -58,8 +58,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 8_065_000 picoseconds. - Weight::from_parts(8_389_000, 0) + // Minimum execution time: 7_775_000 picoseconds. + Weight::from_parts(8_036_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -74,8 +74,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 8_038_000 picoseconds. - Weight::from_parts(8_463_000, 0) + // Minimum execution time: 7_708_000 picoseconds. + Weight::from_parts(7_971_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -90,8 +90,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_843_000 picoseconds. - Weight::from_parts(8_216_000, 0) + // Minimum execution time: 7_746_000 picoseconds. + Weight::from_parts(8_028_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -116,8 +116,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_969_000 picoseconds. - Weight::from_parts(8_362_000, 0) + // Minimum execution time: 7_729_000 picoseconds. + Weight::from_parts(7_954_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -132,8 +132,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_084_000 picoseconds. - Weight::from_parts(10_451_000, 0) + // Minimum execution time: 9_871_000 picoseconds. + Weight::from_parts(10_075_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -148,8 +148,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_948_000 picoseconds. - Weight::from_parts(8_268_000, 0) + // Minimum execution time: 7_869_000 picoseconds. + Weight::from_parts(8_000_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -164,8 +164,24 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 10_257_000 picoseconds. - Weight::from_parts(10_584_000, 0) + // Minimum execution time: 9_797_000 picoseconds. + Weight::from_parts(10_373_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_scheduler_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_718_000 picoseconds. + Weight::from_parts(7_984_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml b/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml index f6bdfeb4877..2561661de1f 100644 --- a/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml +++ b/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml @@ -2,9 +2,11 @@ timeout = 1000 [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 5 needed_approvals = 8 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 5 + [relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] max_approval_coalesce_count = 5 diff --git a/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml b/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml index 5d6f299d461..a2a2621f842 100644 --- a/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml +++ b/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml @@ -2,8 +2,10 @@ timeout = 1000 bootnode = true -[relaychain.genesis.runtimeGenesis.patch.configuration.config] +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] max_validators_per_core = 1 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] needed_approvals = 2 [relaychain] diff --git a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml b/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml index e2fbec079b1..a3bbc82e74b 100644 --- a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml +++ b/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml @@ -3,8 +3,10 @@ timeout = 1000 bootnode = true [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 group_rotation_frequency = 2 [relaychain] diff --git a/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.toml b/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.toml index bef54cb8ca4..858f87b9cfe 100644 --- a/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.toml +++ b/polkadot/zombienet_tests/functional/0006-parachains-max-tranche0.toml @@ -3,10 +3,12 @@ timeout = 1000 bootnode = true [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 7 relay_vrf_modulo_samples = 5 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" diff --git a/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml index 69eb0804d8c..573ccf96138 100644 --- a/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml +++ b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml @@ -2,9 +2,11 @@ timeout = 1000 [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 1 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" diff --git a/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml b/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml index 1ea385c3a42..ea1c93a1403 100644 --- a/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml +++ b/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml @@ -2,9 +2,11 @@ timeout = 1000 [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 1 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" diff --git a/polkadot/zombienet_tests/functional/0010-validator-disabling.toml b/polkadot/zombienet_tests/functional/0010-validator-disabling.toml index 6701d60d74d..c9d79c5f8f2 100644 --- a/polkadot/zombienet_tests/functional/0010-validator-disabling.toml +++ b/polkadot/zombienet_tests/functional/0010-validator-disabling.toml @@ -3,8 +3,10 @@ timeout = 1000 bootnode = true [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 group_rotation_frequency = 10 [relaychain] diff --git a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml b/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml index 5a6832b149b..b776622fdce 100644 --- a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml +++ b/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml @@ -8,13 +8,16 @@ chain = "rococo-local" [relaychain.genesis.runtimeGenesis.patch.configuration.config] needed_approvals = 4 relay_vrf_modulo_samples = 6 - scheduling_lookahead = 2 - group_rotation_frequency = 4 [relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] max_candidate_depth = 3 allowed_ancestry_len = 2 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + lookahead = 2 + group_rotation_frequency = 4 + + [relaychain.default_resources] limits = { memory = "4G", cpu = "2" } requests = { memory = "2G", cpu = "1" } diff --git a/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml index 0dfd814e10a..9b3576eaa3c 100644 --- a/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml +++ b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml @@ -3,9 +3,11 @@ timeout = 1000 bootnode = true [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 2 needed_approvals = 4 - coretime_cores = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 2 + num_cores = 2 [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" diff --git a/polkadot/zombienet_tests/misc/0001-paritydb.toml b/polkadot/zombienet_tests/misc/0001-paritydb.toml index 399f848d3ac..b3ce2081b11 100644 --- a/polkadot/zombienet_tests/misc/0001-paritydb.toml +++ b/polkadot/zombienet_tests/misc/0001-paritydb.toml @@ -3,9 +3,11 @@ timeout = 1000 bootnode = true [relaychain.genesis.runtimeGenesis.patch.configuration.config] - max_validators_per_core = 1 needed_approvals = 3 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" -- GitLab From 761937ecdc8f208361ae2a1154e0f1e6ecf57429 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Thu, 29 Feb 2024 10:21:00 +0200 Subject: [PATCH 23/86] Fix accidental no-shows on node restart (#3277) If approval was in progress we didn't actually restart it, so we end up in a situation where we distribute our assignment, but we don't distribute any approval. --------- Signed-off-by: Alexandru Gheorghe --- polkadot/node/core/approval-voting/src/lib.rs | 85 +- .../approval-voting/src/persisted_entries.rs | 7 + .../node/core/approval-voting/src/tests.rs | 744 +++++++++++++++++- 3 files changed, 780 insertions(+), 56 deletions(-) diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 456ae319787..8cc16a6e1ec 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -1253,13 +1253,20 @@ async fn handle_actions( Action::BecomeActive => { *mode = Mode::Active; - let messages = distribution_messages_for_activation( + let (messages, next_actions) = distribution_messages_for_activation( + ctx, overlayed_db, state, delayed_approvals_timers, - )?; + session_info_provider, + ) + .await?; ctx.send_messages(messages.into_iter()).await; + let next_actions: Vec = + next_actions.into_iter().map(|v| v.clone()).chain(actions_iter).collect(); + + actions_iter = next_actions.into_iter(); }, Action::Conclude => { conclude = true; @@ -1313,15 +1320,19 @@ fn get_assignment_core_indices( } } -fn distribution_messages_for_activation( +#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] +async fn distribution_messages_for_activation( + ctx: &mut Context, db: &OverlayedBackend<'_, impl Backend>, state: &State, delayed_approvals_timers: &mut DelayedApprovalTimer, -) -> SubsystemResult> { + session_info_provider: &mut RuntimeInfo, +) -> SubsystemResult<(Vec, Vec)> { let all_blocks: Vec = db.load_all_blocks()?; let mut approval_meta = Vec::with_capacity(all_blocks.len()); let mut messages = Vec::new(); + let mut actions = Vec::new(); messages.push(ApprovalDistributionMessage::NewBlocks(Vec::new())); // dummy value. @@ -1396,16 +1407,60 @@ fn distribution_messages_for_activation( &claimed_core_indices, &block_entry, ) { - Ok(bitfield) => messages.push( - ApprovalDistributionMessage::DistributeAssignment( - IndirectAssignmentCertV2 { - block_hash, - validator: assignment.validator_index(), - cert: assignment.cert().clone(), - }, - bitfield, - ), - ), + Ok(bitfield) => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate_entry.candidate_receipt().hash(), + ?block_hash, + "Discovered, triggered assignment, not approved yet", + ); + + let indirect_cert = IndirectAssignmentCertV2 { + block_hash, + validator: assignment.validator_index(), + cert: assignment.cert().clone(), + }; + messages.push( + ApprovalDistributionMessage::DistributeAssignment( + indirect_cert.clone(), + bitfield.clone(), + ), + ); + + if !block_entry + .candidate_is_pending_signature(*candidate_hash) + { + let ExtendedSessionInfo { ref executor_params, .. } = + match get_extended_session_info( + session_info_provider, + ctx.sender(), + block_entry.block_hash(), + block_entry.session(), + ) + .await + { + Some(i) => i, + None => continue, + }; + + actions.push(Action::LaunchApproval { + claimed_candidate_indices: bitfield, + candidate_hash: candidate_entry + .candidate_receipt() + .hash(), + indirect_cert, + assignment_tranche: assignment.tranche(), + relay_block_hash: block_hash, + session: block_entry.session(), + executor_params: executor_params.clone(), + candidate: candidate_entry + .candidate_receipt() + .clone(), + backing_group: approval_entry.backing_group(), + distribute_assignment: false, + }); + } + }, Err(err) => { // Should never happen. If we fail here it means the // assignment is null (no cores claimed). @@ -1496,7 +1551,7 @@ fn distribution_messages_for_activation( } messages[0] = ApprovalDistributionMessage::NewBlocks(approval_meta); - Ok(messages) + Ok((messages, actions)) } // Handle an incoming signal from the overseer. Returns true if execution should conclude. diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs index ef47bdb2213..b924a1b52cc 100644 --- a/polkadot/node/core/approval-voting/src/persisted_entries.rs +++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs @@ -588,6 +588,13 @@ impl BlockEntry { !self.candidates_pending_signature.is_empty() } + /// Returns true if candidate hash is in the queue for a signature. + pub fn candidate_is_pending_signature(&self, candidate_hash: CandidateHash) -> bool { + self.candidates_pending_signature + .values() + .any(|context| context.candidate_hash == candidate_hash) + } + /// Candidate hashes for candidates pending signatures fn candidate_hashes_pending_signature(&self) -> Vec { self.candidates_pending_signature diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 9220e84a255..c9053232a4c 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -78,6 +78,7 @@ struct TestSyncOracle { struct TestSyncOracleHandle { done_syncing_receiver: oneshot::Receiver<()>, + is_major_syncing: Arc, } impl TestSyncOracleHandle { @@ -108,8 +109,9 @@ impl SyncOracle for TestSyncOracle { fn make_sync_oracle(val: bool) -> (Box, TestSyncOracleHandle) { let (tx, rx) = oneshot::channel(); let flag = Arc::new(AtomicBool::new(val)); - let oracle = TestSyncOracle { flag, done_syncing_sender: Arc::new(Mutex::new(Some(tx))) }; - let handle = TestSyncOracleHandle { done_syncing_receiver: rx }; + let oracle = + TestSyncOracle { flag: flag.clone(), done_syncing_sender: Arc::new(Mutex::new(Some(tx))) }; + let handle = TestSyncOracleHandle { done_syncing_receiver: rx, is_major_syncing: flag }; (Box::new(oracle), handle) } @@ -465,6 +467,7 @@ struct HarnessConfigBuilder { clock: Option, backend: Option, assignment_criteria: Option>, + major_syncing: bool, } impl HarnessConfigBuilder { @@ -476,9 +479,19 @@ impl HarnessConfigBuilder { self } + pub fn major_syncing(&mut self, value: bool) -> &mut Self { + self.major_syncing = value; + self + } + + pub fn backend(&mut self, store: TestStore) -> &mut Self { + self.backend = Some(store); + self + } + pub fn build(&mut self) -> HarnessConfig { let (sync_oracle, sync_oracle_handle) = - self.sync_oracle.take().unwrap_or_else(|| make_sync_oracle(false)); + self.sync_oracle.take().unwrap_or_else(|| make_sync_oracle(self.major_syncing)); let assignment_criteria = self .assignment_criteria @@ -736,11 +749,13 @@ struct BlockConfig { slot: Slot, candidates: Option>, session_info: Option, + end_syncing: bool, } struct ChainBuilder { blocks_by_hash: HashMap, blocks_at_height: BTreeMap>, + is_major_syncing: Arc, } impl ChainBuilder { @@ -748,16 +763,28 @@ impl ChainBuilder { const GENESIS_PARENT_HASH: Hash = Hash::repeat_byte(0x00); pub fn new() -> Self { - let mut builder = - Self { blocks_by_hash: HashMap::new(), blocks_at_height: BTreeMap::new() }; + let mut builder = Self { + blocks_by_hash: HashMap::new(), + blocks_at_height: BTreeMap::new(), + is_major_syncing: Arc::new(AtomicBool::new(false)), + }; builder.add_block_inner( Self::GENESIS_HASH, Self::GENESIS_PARENT_HASH, 0, - BlockConfig { slot: Slot::from(0), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(0), + candidates: None, + session_info: None, + end_syncing: false, + }, ); builder } + pub fn major_syncing(&mut self, major_syncing: Arc) -> &mut Self { + self.is_major_syncing = major_syncing; + self + } pub fn add_block( &mut self, @@ -808,8 +835,16 @@ impl ChainBuilder { } ancestry.reverse(); - import_block(overseer, ancestry.as_ref(), *number, block_config, false, i > 0) - .await; + import_block( + overseer, + ancestry.as_ref(), + *number, + block_config, + false, + i > 0, + self.is_major_syncing.clone(), + ) + .await; let _: Option<()> = future::pending().timeout(Duration::from_millis(100)).await; } } @@ -863,6 +898,7 @@ async fn import_block( config: &BlockConfig, gap: bool, fork: bool, + major_syncing: Arc, ) { let (new_head, new_header) = &hashes[hashes.len() - 1]; let candidates = config.candidates.clone().unwrap_or(vec![( @@ -891,6 +927,12 @@ async fn import_block( h_tx.send(Ok(Some(new_header.clone()))).unwrap(); } ); + + let is_major_syncing = major_syncing.load(Ordering::SeqCst); + if config.end_syncing { + major_syncing.store(false, Ordering::SeqCst); + } + if !fork { let mut _ancestry_step = 0; if gap { @@ -931,7 +973,7 @@ async fn import_block( } } - if number > 0 { + if number > 0 && !is_major_syncing { assert_matches!( overseer_recv(overseer).await, AllMessages::RuntimeApi( @@ -944,7 +986,6 @@ async fn import_block( c_tx.send(Ok(inclusion_events)).unwrap(); } ); - assert_matches!( overseer_recv(overseer).await, AllMessages::RuntimeApi( @@ -984,14 +1025,14 @@ async fn import_block( ); } - if number == 0 { + if number == 0 && !is_major_syncing { assert_matches!( overseer_recv(overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NewBlocks(v)) => { assert_eq!(v.len(), 0usize); } ); - } else { + } else if number > 0 && !is_major_syncing { if !fork { // SessionInfo won't be called for forks - it's already cached assert_matches!( @@ -1031,20 +1072,23 @@ async fn import_block( ); } - assert_matches!( - overseer_recv(overseer).await, - AllMessages::ApprovalDistribution( - ApprovalDistributionMessage::NewBlocks(mut approval_vec) - ) => { - assert_eq!(approval_vec.len(), 1); - let metadata = approval_vec.pop().unwrap(); - let hash = &hashes[number as usize]; - let parent_hash = &hashes[(number - 1) as usize]; - assert_eq!(metadata.hash, hash.0.clone()); - assert_eq!(metadata.parent_hash, parent_hash.0.clone()); - assert_eq!(metadata.slot, config.slot); - } - ); + if !is_major_syncing { + assert_matches!( + overseer_recv(overseer).await, + + AllMessages::ApprovalDistribution( + ApprovalDistributionMessage::NewBlocks(mut approval_vec) + ) => { + assert_eq!(approval_vec.len(), 1); + let metadata = approval_vec.pop().unwrap(); + let hash = &hashes[number as usize]; + let parent_hash = &hashes[(number - 1) as usize]; + assert_eq!(metadata.hash, hash.0.clone()); + assert_eq!(metadata.parent_hash, parent_hash.0.clone()); + assert_eq!(metadata.slot, config.slot); + } + ); + } } } @@ -1072,7 +1116,7 @@ fn subsystem_rejects_bad_assignment_ok_criteria() { block_hash, head, 1, - BlockConfig { slot, candidates: None, session_info: None }, + BlockConfig { slot, candidates: None, session_info: None, end_syncing: false }, ); builder.build(&mut virtual_overseer).await; @@ -1135,7 +1179,7 @@ fn subsystem_rejects_bad_assignment_err_criteria() { block_hash, head, 1, - BlockConfig { slot, candidates: None, session_info: None }, + BlockConfig { slot, candidates: None, session_info: None, end_syncing: false }, ); builder.build(&mut virtual_overseer).await; @@ -1240,6 +1284,7 @@ fn subsystem_rejects_approval_if_no_candidate_entry() { slot, candidates: Some(vec![(candidate_descriptor, CoreIndex(1), GroupIndex(1))]), session_info: None, + end_syncing: false, }, ); builder.build(&mut virtual_overseer).await; @@ -1345,7 +1390,12 @@ fn subsystem_rejects_approval_before_assignment() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1398,7 +1448,12 @@ fn subsystem_rejects_assignment_in_future() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(0), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(0), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1472,6 +1527,7 @@ fn subsystem_accepts_duplicate_assignment() { (candidate_receipt2, CoreIndex(1), GroupIndex(1)), ]), session_info: None, + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -1537,7 +1593,12 @@ fn subsystem_rejects_assignment_with_unknown_candidate() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1582,7 +1643,12 @@ fn subsystem_rejects_oversized_bitfields() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1650,7 +1716,12 @@ fn subsystem_accepts_and_imports_approval_after_assignment() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1741,6 +1812,7 @@ fn subsystem_second_approval_import_only_schedules_wakeups() { slot: Slot::from(0), candidates: None, session_info: Some(session_info), + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -1824,7 +1896,12 @@ fn subsystem_assignment_import_updates_candidate_entry_and_schedules_wakeup() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1873,7 +1950,12 @@ fn subsystem_process_wakeup_schedules_wakeup() { block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot: Slot::from(1), candidates: None, session_info: None }, + BlockConfig { + slot: Slot::from(1), + candidates: None, + session_info: None, + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -1925,7 +2007,7 @@ fn linear_import_act_on_leaf() { hash, head, i, - BlockConfig { slot, candidates: None, session_info: None }, + BlockConfig { slot, candidates: None, session_info: None, end_syncing: false }, ); head = hash; } @@ -1983,7 +2065,7 @@ fn forkful_import_at_same_height_act_on_leaf() { hash, head, i, - BlockConfig { slot, candidates: None, session_info: None }, + BlockConfig { slot, candidates: None, session_info: None, end_syncing: false }, ); head = hash; } @@ -1997,7 +2079,7 @@ fn forkful_import_at_same_height_act_on_leaf() { hash, head, session, - BlockConfig { slot, candidates: None, session_info: None }, + BlockConfig { slot, candidates: None, session_info: None, end_syncing: false }, ); } builder.build(&mut virtual_overseer).await; @@ -2168,6 +2250,7 @@ fn import_checked_approval_updates_entries_and_schedules() { slot, candidates: Some(vec![(candidate_descriptor, CoreIndex(0), GroupIndex(0))]), session_info: Some(session_info), + end_syncing: false, }, ); builder.build(&mut virtual_overseer).await; @@ -2323,6 +2406,7 @@ fn subsystem_import_checked_approval_sets_one_block_bit_at_a_time() { (candidate_receipt2, CoreIndex(1), GroupIndex(1)), ]), session_info: Some(session_info), + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -2445,6 +2529,7 @@ fn approved_ancestor_test( slot: Slot::from(i as u64), candidates: Some(vec![(candidate_receipt, CoreIndex(0), GroupIndex(0))]), session_info: None, + end_syncing: false, }, ); } @@ -2623,13 +2708,19 @@ fn subsystem_validate_approvals_cache() { slot, candidates: candidates.clone(), session_info: Some(session_info.clone()), + end_syncing: false, }, ) .add_block( fork_block_hash, ChainBuilder::GENESIS_HASH, 1, - BlockConfig { slot, candidates, session_info: Some(session_info) }, + BlockConfig { + slot, + candidates, + session_info: Some(session_info), + end_syncing: false, + }, ) .build(&mut virtual_overseer) .await; @@ -2740,6 +2831,7 @@ fn subsystem_doesnt_distribute_duplicate_compact_assignments() { (candidate_receipt2.clone(), CoreIndex(1), GroupIndex(1)), ]), session_info: None, + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -2997,6 +3089,7 @@ where slot, candidates: Some(vec![(candidate_receipt, CoreIndex(0), GroupIndex(2))]), session_info: Some(session_info), + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -3314,6 +3407,7 @@ fn pre_covers_dont_stall_approval() { slot, candidates: Some(vec![(candidate_descriptor, CoreIndex(0), GroupIndex(0))]), session_info: Some(session_info), + end_syncing: false, }, ); builder.build(&mut virtual_overseer).await; @@ -3491,6 +3585,7 @@ fn waits_until_approving_assignments_are_old_enough() { slot, candidates: Some(vec![(candidate_descriptor, CoreIndex(0), GroupIndex(0))]), session_info: Some(session_info), + end_syncing: false, }, ); builder.build(&mut virtual_overseer).await; @@ -3705,6 +3800,7 @@ fn test_approval_is_sent_on_max_approval_coalesce_count() { slot, candidates: candidates.clone(), session_info: Some(session_info.clone()), + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -3943,8 +4039,7 @@ fn test_approval_is_sent_on_max_approval_coalesce_wait() { let store = config.backend(); test_harness(config, |test_harness| async move { - let TestHarness { mut virtual_overseer, clock, sync_oracle_handle: _sync_oracle_handle } = - test_harness; + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle: _ } = test_harness; assert_matches!( overseer_recv(&mut virtual_overseer).await, @@ -4006,6 +4101,7 @@ fn test_approval_is_sent_on_max_approval_coalesce_wait() { slot, candidates: candidates.clone(), session_info: Some(session_info.clone()), + end_syncing: false, }, ) .build(&mut virtual_overseer) @@ -4040,3 +4136,569 @@ fn test_approval_is_sent_on_max_approval_coalesce_wait() { virtual_overseer }); } + +// Builds a chain with a fork where both relay blocks include the same candidate. +async fn build_chain_with_two_blocks_with_one_candidate_each( + block_hash1: Hash, + block_hash2: Hash, + slot: Slot, + sync_oracle_handle: TestSyncOracleHandle, + candidate_receipt: CandidateReceipt, +) -> (ChainBuilder, SessionInfo) { + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Eve, + ]; + let session_info = SessionInfo { + validator_groups: IndexedVec::>::from(vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3), ValidatorIndex(4)], + ]), + ..session_info(&validators) + }; + + let candidates = Some(vec![(candidate_receipt.clone(), CoreIndex(0), GroupIndex(0))]); + let mut chain_builder = ChainBuilder::new(); + + chain_builder + .major_syncing(sync_oracle_handle.is_major_syncing.clone()) + .add_block( + block_hash1, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot, + candidates: candidates.clone(), + session_info: Some(session_info.clone()), + end_syncing: false, + }, + ) + .add_block( + block_hash2, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot, + candidates, + session_info: Some(session_info.clone()), + end_syncing: true, + }, + ); + (chain_builder, session_info) +} + +async fn setup_overseer_with_two_blocks_each_with_one_assignment_triggered( + virtual_overseer: &mut VirtualOverseer, + store: TestStore, + clock: &Box, + sync_oracle_handle: TestSyncOracleHandle, +) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let fork_block_hash = Hash::repeat_byte(0x02); + let candidate_commitments = CandidateCommitments::default(); + let mut candidate_receipt = dummy_candidate_receipt(block_hash); + candidate_receipt.commitments_hash = candidate_commitments.hash(); + let candidate_hash = candidate_receipt.hash(); + let slot = Slot::from(1); + let (chain_builder, _session_info) = build_chain_with_two_blocks_with_one_candidate_each( + block_hash, + fork_block_hash, + slot, + sync_oracle_handle, + candidate_receipt, + ) + .await; + chain_builder.build(virtual_overseer).await; + + assert!(!clock.inner.lock().current_wakeup_is(1)); + clock.inner.lock().wakeup_all(1); + + assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot))); + clock.inner.lock().wakeup_all(slot_to_tick(slot)); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); + + assert_eq!(clock.inner.lock().wakeups.len(), 0); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + let candidate_entry = store.load_candidate_entry(&candidate_hash).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); +} + +// Tests that for candidates that we did not approve yet, for which we triggered the assignment and +// the approval work we restart the work to approve it. +#[test] +fn subsystem_relaunches_approval_work_on_restart() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) + .into(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] + .try_into() + .unwrap(), + }), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + assignments + }, + |_| Ok(0), + )); + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + let store_clone = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + setup_overseer_with_two_blocks_each_with_one_assignment_triggered( + &mut virtual_overseer, + store, + &clock, + sync_oracle_handle, + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _ + )) => { + } + ); + + // Bail early after the assignment has been distributed but before we answer with the mocked + // approval from CandidateValidation. + virtual_overseer + }); + + // Restart a new approval voting subsystem with the same database and major syncing true untill + // the last leaf. + let config = HarnessConfigBuilder::default().backend(store_clone).major_syncing(true).build(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let fork_block_hash = Hash::repeat_byte(0x02); + let candidate_commitments = CandidateCommitments::default(); + let mut candidate_receipt = dummy_candidate_receipt(block_hash); + candidate_receipt.commitments_hash = candidate_commitments.hash(); + let slot = Slot::from(1); + clock.inner.lock().set_tick(slot_to_tick(slot + 2)); + let (chain_builder, session_info) = build_chain_with_two_blocks_with_one_candidate_each( + block_hash, + fork_block_hash, + slot, + sync_oracle_handle, + candidate_receipt, + ) + .await; + + chain_builder.build(&mut virtual_overseer).await; + + futures_timer::Delay::new(Duration::from_millis(2000)).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionInfo(_, si_tx), + ) + ) => { + si_tx.send(Ok(Some(session_info.clone()))).unwrap(); + } + ); + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionExecutorParams(_, si_tx), + ) + ) => { + // Make sure all SessionExecutorParams calls are not made for the leaf (but for its relay parent) + si_tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); + + // On major syncing ending Approval voting should send all the necessary messages for a + // candidate to be approved. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NewBlocks( + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + // Guarantees the approval work has been relaunched. + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + exec_kind, + response_sender, + .. + }) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + virtual_overseer + }); +} + +// Test that cached approvals, which are candidates that we approved but we didn't issue +// the signature yet because we want to coalesce it with more candidate are sent after restart. +#[test] +fn subsystem_sends_pending_approvals_on_approval_restart() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) + .into(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] + .try_into() + .unwrap(), + }), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + assignments + }, + |_| Ok(0), + )); + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + let store_clone = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + setup_overseer_with_two_blocks_each_with_one_assignment_triggered( + &mut virtual_overseer, + store, + &clock, + sync_oracle_handle, + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _ + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + exec_kind, + response_sender, + .. + }) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + + // Configure a big coalesce number, so that the signature is cached instead of being sent to + // approval-distribution. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 6, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 6, + })); + } + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + virtual_overseer + }); + + let config = HarnessConfigBuilder::default().backend(store_clone).major_syncing(true).build(); + // On restart signatures should be sent to approval-distribution without relaunching the + // approval work. + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let fork_block_hash = Hash::repeat_byte(0x02); + let candidate_commitments = CandidateCommitments::default(); + let mut candidate_receipt = dummy_candidate_receipt(block_hash); + candidate_receipt.commitments_hash = candidate_commitments.hash(); + let slot = Slot::from(1); + + clock.inner.lock().set_tick(slot_to_tick(slot + 2)); + let (chain_builder, session_info) = build_chain_with_two_blocks_with_one_candidate_each( + block_hash, + fork_block_hash, + slot, + sync_oracle_handle, + candidate_receipt, + ) + .await; + chain_builder.build(&mut virtual_overseer).await; + + futures_timer::Delay::new(Duration::from_millis(2000)).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NewBlocks( + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionInfo(_, si_tx), + ) + ) => { + si_tx.send(Ok(Some(session_info.clone()))).unwrap(); + } + ); + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionExecutorParams(_, si_tx), + ) + ) => { + // Make sure all SessionExecutorParams calls are not made for the leaf (but for its relay parent) + si_tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), ) + ) => { + si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + virtual_overseer + }); +} -- GitLab From c244a94e4a77c0798c5a45d3004ec2389378b394 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 29 Feb 2024 11:41:11 +0000 Subject: [PATCH 24/86] update development setup in sdk-docs (#3506) Co-authored-by: Liam Aharon --- .../development_environment_advice.rs | 71 +++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/docs/sdk/src/reference_docs/development_environment_advice.rs b/docs/sdk/src/reference_docs/development_environment_advice.rs index 43176959793..21bbe78836c 100644 --- a/docs/sdk/src/reference_docs/development_environment_advice.rs +++ b/docs/sdk/src/reference_docs/development_environment_advice.rs @@ -111,3 +111,74 @@ //! If you have a powerful remote server available, you may consider using //! [cargo-remote](https://github.com/sgeisler/cargo-remote) to execute cargo commands on it, //! freeing up local resources for other tasks like `rust-analyzer`. +//! +//! When using `cargo-remote`, you can configure your editor to perform the the typical +//! "check-on-save" remotely as well. The configuration for VSCode is as follows: +//! +//! ```json +//! { +//! "rust-analyzer.cargo.buildScripts.overrideCommand": [ +//! "cargo", +//! "remote", +//! "--build-env", +//! "SKIP_WASM_BUILD=1", +//! "--", +//! "check", +//! "--message-format=json", +//! "--all-targets", +//! "--all-features", +//! "--target-dir=target/rust-analyzer" +//! ], +//! "rust-analyzer.check.overrideCommand": [ +//! "cargo", +//! "remote", +//! "--build-env", +//! "SKIP_WASM_BUILD=1", +//! "--", +//! "check", +//! "--workspace", +//! "--message-format=json", +//! "--all-targets", +//! "--all-features", +//! "--target-dir=target/rust-analyzer" +//! ], +//! } +//! ``` +//! +//! //! and the same in Lua for `neovim/nvim-lspconfig`: +//! +//! ```lua +//! ["rust-analyzer"] = { +//! cargo = { +//! buildScripts = { +//! overrideCommand = { +//! "cargo", +//! "remote", +//! "--build-env", +//! "SKIP_WASM_BUILD=1", +//! "--", +//! "check", +//! "--message-format=json", +//! "--all-targets", +//! "--all-features", +//! "--target-dir=target/rust-analyzer" +//! }, +//! }, +//! check = { +//! overrideCommand = { +//! "cargo", +//! "remote", +//! "--build-env", +//! "SKIP_WASM_BUILD=1", +//! "--", +//! "check", +//! "--workspace", +//! "--message-format=json", +//! "--all-targets", +//! "--all-features", +//! "--target-dir=target/rust-analyzer" +//! }, +//! }, +//! }, +//! }, +//! ``` -- GitLab From 7f5d308d2983dcce324a8625edf3f2a838209da8 Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Thu, 29 Feb 2024 15:32:58 +0200 Subject: [PATCH 25/86] Enable elastic scaling node feature in local testnets genesis (#3509) Signed-off-by: Andrei Sandu --- Cargo.lock | 1 + polkadot/node/service/Cargo.toml | 5 ++- polkadot/node/service/src/chain_spec.rs | 7 +++- polkadot/runtime/parachains/src/builder.rs | 24 ++++++++---- .../functional/0012-elastic-scaling-mvp.zndsl | 11 +----- .../functional/0012-enable-node-feature.js | 37 ------------------- 6 files changed, 27 insertions(+), 58 deletions(-) delete mode 100644 polkadot/zombienet_tests/functional/0012-enable-node-feature.js diff --git a/Cargo.lock b/Cargo.lock index 3838ed0e94a..991541ee84e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13526,6 +13526,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "async-trait", + "bitvec", "env_logger 0.9.3", "frame-benchmarking", "frame-benchmarking-cli", diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 8fd9f20b7bc..734dcbdeb44 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -93,6 +93,7 @@ kvdb-rocksdb = { version = "0.19.0", optional = true } parity-db = { version = "0.4.12", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1" } parking_lot = "0.12.1" +bitvec = { version = "1.0.1", optional = true } # Polkadot polkadot-core-primitives = { path = "../../core-primitives" } @@ -184,8 +185,8 @@ full-node = [ ] # Configure the native runtimes to use. -westend-native = ["westend-runtime", "westend-runtime-constants"] -rococo-native = ["rococo-runtime", "rococo-runtime-constants"] +westend-native = ["bitvec", "westend-runtime", "westend-runtime-constants"] +rococo-native = ["bitvec", "rococo-runtime", "rococo-runtime-constants"] runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index af05af87a46..1c44b17b6fd 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -122,7 +122,9 @@ pub fn wococo_config() -> Result { fn default_parachains_host_configuration( ) -> polkadot_runtime_parachains::configuration::HostConfiguration { - use polkadot_primitives::{AsyncBackingParams, MAX_CODE_SIZE, MAX_POV_SIZE}; + use polkadot_primitives::{ + vstaging::node_features::FeatureIndex, AsyncBackingParams, MAX_CODE_SIZE, MAX_POV_SIZE, + }; polkadot_runtime_parachains::configuration::HostConfiguration { validation_upgrade_cooldown: 2u32, @@ -155,6 +157,9 @@ fn default_parachains_host_configuration( max_candidate_depth: 3, allowed_ancestry_len: 2, }, + node_features: bitvec::vec::BitVec::from_element( + 1u8 << (FeatureIndex::ElasticScalingMVP as usize), + ), scheduler_params: SchedulerParams { lookahead: 2, group_rotation_frequency: 20, diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 0e4e659fef2..5b218addb1a 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -25,12 +25,13 @@ use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use primitives::{ - collator_signature_payload, AvailabilityBitfield, BackedCandidate, CandidateCommitments, - CandidateDescriptor, CandidateHash, CollatorId, CollatorSignature, CommittedCandidateReceipt, - CompactStatement, CoreIndex, DisputeStatement, DisputeStatementSet, GroupIndex, HeadData, - Id as ParaId, IndexedVec, InherentData as ParachainsInherentData, InvalidDisputeStatementKind, - PersistedValidationData, SessionIndex, SigningContext, UncheckedSigned, - ValidDisputeStatementKind, ValidationCode, ValidatorId, ValidatorIndex, ValidityAttestation, + collator_signature_payload, vstaging::node_features::FeatureIndex, AvailabilityBitfield, + BackedCandidate, CandidateCommitments, CandidateDescriptor, CandidateHash, CollatorId, + CollatorSignature, CommittedCandidateReceipt, CompactStatement, CoreIndex, DisputeStatement, + DisputeStatementSet, GroupIndex, HeadData, Id as ParaId, IndexedVec, + InherentData as ParachainsInherentData, InvalidDisputeStatementKind, PersistedValidationData, + SessionIndex, SigningContext, UncheckedSigned, ValidDisputeStatementKind, ValidationCode, + ValidatorId, ValidatorIndex, ValidityAttestation, }; use sp_core::{sr25519, H256}; use sp_runtime::{ @@ -509,7 +510,7 @@ impl BenchBuilder { .iter() .map(|(seed, num_votes)| { assert!(*num_votes <= validators.len() as u32); - let (para_id, _core_idx, group_idx) = self.create_indexes(*seed); + let (para_id, core_idx, group_idx) = self.create_indexes(*seed); // This generates a pair and adds it to the keystore, returning just the public. let collator_public = CollatorId::generate_pair(None); @@ -586,11 +587,18 @@ impl BenchBuilder { }) .collect(); + // Check if the elastic scaling bit is set, if so we need to supply the core index + // in the generated candidate. + let core_idx = configuration::Pallet::::config() + .node_features + .get(FeatureIndex::ElasticScalingMVP as usize) + .map(|_the_bit| core_idx); + BackedCandidate::::new( candidate, validity_votes, bitvec::bitvec![u8, bitvec::order::Lsb0; 1; group_validators.len()], - None, + core_idx, ) }) .collect() diff --git a/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl index a7193c9282b..edc87c802a0 100644 --- a/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl +++ b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl @@ -1,4 +1,4 @@ -Description: Test that a paraid acquiring multiple cores does not brick itself if ElasticScalingMVP feature is enabled +Description: Test that a paraid acquiring multiple cores does not brick itself if ElasticScalingMVP feature is enabled in genesis Network: ./0012-elastic-scaling-mvp.toml Creds: config @@ -15,14 +15,5 @@ alice: js-script ./0012-register-para.js return is 0 within 600 seconds validator: reports substrate_block_height{status="finalized"} is at least 35 within 100 seconds -# Parachain will now be stalled -validator: parachain 2000 block height is lower than 20 within 300 seconds - -# Enable the ElasticScalingMVP node feature. -alice: js-script ./0012-enable-node-feature.js with "1" return is 0 within 600 seconds - -# Wait two sessions for the config to be updated. -sleep 120 seconds - # Ensure parachain is now making progress. validator: parachain 2000 block height is at least 30 within 200 seconds diff --git a/polkadot/zombienet_tests/functional/0012-enable-node-feature.js b/polkadot/zombienet_tests/functional/0012-enable-node-feature.js deleted file mode 100644 index 4822e1f6644..00000000000 --- a/polkadot/zombienet_tests/functional/0012-enable-node-feature.js +++ /dev/null @@ -1,37 +0,0 @@ -async function run(nodeName, networkInfo, index) { - const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; - const api = await zombie.connect(wsUri, userDefinedTypes); - - await zombie.util.cryptoWaitReady(); - - // account to submit tx - const keyring = new zombie.Keyring({ type: "sr25519" }); - const alice = keyring.addFromUri("//Alice"); - - await new Promise(async (resolve, reject) => { - const unsub = await api.tx.sudo - .sudo(api.tx.configuration.setNodeFeature(Number(index), true)) - .signAndSend(alice, ({ status, isError }) => { - if (status.isInBlock) { - console.log( - `Transaction included at blockhash ${status.asInBlock}`, - ); - } else if (status.isFinalized) { - console.log( - `Transaction finalized at blockHash ${status.asFinalized}`, - ); - unsub(); - return resolve(); - } else if (isError) { - console.log(`Transaction error`); - reject(`Transaction error`); - } - }); - }); - - - - return 0; -} - -module.exports = { run }; -- GitLab From c0e52a9ed61451152c4e096c58f24fb4e329bd64 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 29 Feb 2024 19:08:08 +0000 Subject: [PATCH 26/86] Fix call enum's metadata regression (#3513) This fixes an issue introduced in https://github.com/paritytech/substrate/pull/14101, in which I removed the `Call` enum's documentation and replaced it with a link to the `Pallet` struct, but this also removed any docs related to call from the metadata. I tried to add a regression test for this, but it seems to me that this is not possible, given that using `type-info` we only assert in type-ids for `Call`, `Event` and `Error`. I removed some doc comments from a test setup in `frame-support-test` to demonstrate the issue there. @jsdw do you have any comments on this? I also fixed a small issue in the custom html/css of `polkadot-sdk-doc` crate, making sure it does not affect the rust-doc page of all other crates. - [x] Investigate a regression test - [x] prdoc --- docs/sdk/headers/header.html | 111 +++++++++--------- docs/sdk/headers/theme.css | 25 ++-- prdoc/pr_3513.prdoc | 18 +++ substrate/frame/support/Cargo.toml | 21 +++- .../procedural/src/pallet/expand/call.rs | 21 +--- substrate/frame/support/test/tests/pallet.rs | 64 +++++++--- 6 files changed, 157 insertions(+), 103 deletions(-) create mode 100644 prdoc/pr_3513.prdoc diff --git a/docs/sdk/headers/header.html b/docs/sdk/headers/header.html index 68dc5d5509e..e28458c4ccc 100644 --- a/docs/sdk/headers/header.html +++ b/docs/sdk/headers/header.html @@ -54,7 +54,7 @@ sidebarElems.style.display = 'none'; // Add click event listener to the button - expandButton.addEventListener('click', function() { + expandButton.addEventListener('click', function () { // Toggle the display of the '.sidebar-elems' if (sidebarElems.style.display === 'none') { sidebarElems.style.display = 'block'; @@ -72,6 +72,9 @@ if (!crate_name.textContent.startsWith("polkadot_sdk_docs")) { console.log("skipping -- not `polkadot_sdk_docs`"); return; + } else { + // insert class 'sdk-docs' to the body, so it enables the custom css rules. + document.body.classList.add("sdk-docs"); } createToC(); @@ -82,58 +85,60 @@ diff --git a/docs/sdk/headers/theme.css b/docs/sdk/headers/theme.css index bb9254ec4a8..a488e15c36b 100644 --- a/docs/sdk/headers/theme.css +++ b/docs/sdk/headers/theme.css @@ -1,16 +1,17 @@ :root { - --polkadot-pink: #E6007A ; - --polkadot-green: #56F39A ; - --polkadot-lime: #D3FF33 ; - --polkadot-cyan: #00B2FF ; - --polkadot-purple: #552BBF ; - } - -body > nav.sidebar > div.sidebar-crate > a > img { - /* logo width, given that the sidebar width is 200px; */ - width: 190px; + --polkadot-pink: #E6007A; + --polkadot-green: #56F39A; + --polkadot-lime: #D3FF33; + --polkadot-cyan: #00B2FF; + --polkadot-purple: #552BBF; } -body nav.sidebar { - flex: 0 0 250px; +body.sdk-docs { + nav.sidebar>div.sidebar-crate>a>img { + width: 190px; + } + + nav.sidebar { + flex: 0 0 250px; + } } diff --git a/prdoc/pr_3513.prdoc b/prdoc/pr_3513.prdoc new file mode 100644 index 00000000000..e1f2afe280a --- /dev/null +++ b/prdoc/pr_3513.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix call enum's metadata regression + +doc: + - audience: Runtime Dev + - audience: Runtime User + description: | + This PR fixes an issue where in the metadata of all FRAME-based chains, the documentation for + all extrinsic/call/transactions has been replaced by a single line saying "see Pallet::name". + + Many wallets display the metadata of transactions to the user before signing, and this bug + might have affected this process. + +crates: + - name: frame + - name: frame-support diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index 72e2d0bfa56..c19f478c803 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -18,13 +18,24 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { version = "6.1", default-features = false } serde = { features = ["alloc", "derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } -sp-api = { path = "../../primitives/api", default-features = false, features = ["frame-metadata"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", + "max-encoded-len", +] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } +frame-metadata = { version = "16.0.0", default-features = false, features = [ + "current", +] } +sp-api = { path = "../../primitives/api", default-features = false, features = [ + "frame-metadata", +] } sp-std = { path = "../../primitives/std", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } +sp-runtime = { path = "../../primitives/runtime", default-features = false, features = [ + "serde", +] } sp-tracing = { path = "../../primitives/tracing", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } diff --git a/substrate/frame/support/procedural/src/pallet/expand/call.rs b/substrate/frame/support/procedural/src/pallet/expand/call.rs index f43faba1ee0..f395872c8a8 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/call.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/call.rs @@ -18,7 +18,7 @@ use crate::{ pallet::{ expand::warnings::{weight_constant_warning, weight_witness_warning}, - parse::call::{CallVariantDef, CallWeightDef}, + parse::call::CallWeightDef, Def, }, COUNTER, @@ -112,22 +112,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { } debug_assert_eq!(fn_weight.len(), methods.len()); - let map_fn_docs = if !def.dev_mode { - // Emit the [`Pallet::method`] documentation only for non-dev modes. - |method: &CallVariantDef| { - let reference = format!("See [`Pallet::{}`].", method.name); - quote!(#reference) - } - } else { - // For the dev-mode do not provide a documenation link as it will break the - // `cargo doc` if the pallet is private inside a test. - |method: &CallVariantDef| { - let reference = format!("See `Pallet::{}`.", method.name); - quote!(#reference) - } - }; - - let fn_doc = methods.iter().map(map_fn_docs).collect::>(); + let fn_doc = methods.iter().map(|method| &method.docs).collect::>(); let args_name = methods .iter() @@ -309,7 +294,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { ), #( #cfg_attrs - #[doc = #fn_doc] + #( #[doc = #fn_doc] )* #[codec(index = #call_index)] #fn_name { #( diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 7bee4b7c820..607f54ccc13 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -209,7 +209,7 @@ pub mod pallet { where T::AccountId: From + From + SomeAssociation1, { - /// Doc comment put in metadata + /// call foo doc comment put in metadata #[pallet::call_index(0)] #[pallet::weight(Weight::from_parts(*foo as u64, 0))] pub fn foo( @@ -225,7 +225,7 @@ pub mod pallet { Ok(().into()) } - /// Doc comment put in metadata + /// call foo_storage_layer doc comment put in metadata #[pallet::call_index(1)] #[pallet::weight({1})] pub fn foo_storage_layer( @@ -270,7 +270,7 @@ pub mod pallet { #[pallet::error] #[derive(PartialEq, Eq)] pub enum Error { - /// doc comment put into metadata + /// error doc comment put in metadata InsufficientProposersBalance, NonExistentStorageValue, Code(u8), @@ -287,9 +287,8 @@ pub mod pallet { where T::AccountId: SomeAssociation1 + From, { - /// doc comment put in metadata + /// event doc comment put in metadata Proposed(::AccountId), - /// doc Spending(BalanceOf), Something(u32), SomethingElse(::_1), @@ -750,8 +749,7 @@ pub type UncheckedExtrinsic = sp_runtime::testing::TestXt>; frame_support::construct_runtime!( - pub struct Runtime - { + pub struct Runtime { // Exclude part `Storage` in order not to check its metadata in tests. System: frame_system exclude_parts { Pallet, Storage }, Example: pallet, @@ -772,6 +770,14 @@ fn _ensure_call_is_correctly_excluded_and_included(call: RuntimeCall) { } } +fn maybe_docs(doc: Vec<&'static str>) -> Vec<&'static str> { + if cfg!(feature = "no-metadata-docs") { + vec![] + } else { + doc + } +} + #[test] fn transactional_works() { TestExternalities::default().execute_with(|| { @@ -1362,19 +1368,47 @@ fn migrate_from_pallet_version_to_storage_version() { }); } +#[test] +fn pallet_item_docs_in_metadata() { + // call + let call_variants = match meta_type::>().type_info().type_def { + scale_info::TypeDef::Variant(variants) => variants.variants, + _ => unreachable!(), + }; + + assert_eq!(call_variants[0].docs, maybe_docs(vec!["call foo doc comment put in metadata"])); + assert_eq!( + call_variants[1].docs, + maybe_docs(vec!["call foo_storage_layer doc comment put in metadata"]) + ); + assert!(call_variants[2].docs.is_empty()); + + // event + let event_variants = match meta_type::>().type_info().type_def { + scale_info::TypeDef::Variant(variants) => variants.variants, + _ => unreachable!(), + }; + + assert_eq!(event_variants[0].docs, maybe_docs(vec!["event doc comment put in metadata"])); + assert!(event_variants[1].docs.is_empty()); + + // error + let error_variants = match meta_type::>().type_info().type_def { + scale_info::TypeDef::Variant(variants) => variants.variants, + _ => unreachable!(), + }; + + assert_eq!(error_variants[0].docs, maybe_docs(vec!["error doc comment put in metadata"])); + assert!(error_variants[1].docs.is_empty()); + + // storage is already covered in the main `fn metadata` test. +} + #[test] fn metadata() { use codec::Decode; use frame_metadata::{v15::*, *}; - fn maybe_docs(doc: Vec<&'static str>) -> Vec<&'static str> { - if cfg!(feature = "no-metadata-docs") { - vec![] - } else { - doc - } - } - let readme = "Support code for the runtime.\n\nLicense: Apache-2.0\n"; let expected_pallet_doc = vec![" Pallet documentation", readme, readme]; -- GitLab From 650886683d42de45fbcb92645d988248dda6dbba Mon Sep 17 00:00:00 2001 From: Francisco Aguirre Date: Fri, 1 Mar 2024 08:31:48 +0100 Subject: [PATCH 27/86] Add `claim_assets` extrinsic to `pallet-xcm` (#3403) If an XCM execution fails or ends with leftover assets, these will be trapped. In order to claim them, a custom XCM has to be executed, with the `ClaimAsset` instruction. However, arbitrary XCM execution is not allowed everywhere yet and XCM itself is still not easy enough to use for users out there with trapped assets. This new extrinsic in `pallet-xcm` will allow these users to easily claim their assets, without concerning themselves with writing arbitrary XCMs. Part of fixing https://github.com/paritytech/polkadot-sdk/issues/3495 --------- Co-authored-by: command-bot <> Co-authored-by: Adrian Catangiu --- .../assets/asset-hub-rococo/src/lib.rs | 7 + .../src/weights/pallet_xcm.rs | 146 ++++--- .../assets/asset-hub-westend/src/lib.rs | 7 + .../src/weights/pallet_xcm.rs | 150 ++++--- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 7 + .../src/weights/pallet_xcm.rs | 142 +++--- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 7 + .../src/weights/pallet_xcm.rs | 146 ++++--- .../collectives-westend/src/lib.rs | 7 + .../src/weights/pallet_xcm.rs | 142 +++--- .../contracts/contracts-rococo/src/lib.rs | 7 + .../coretime/coretime-rococo/src/lib.rs | 7 + .../coretime-rococo/src/weights/pallet_xcm.rs | 154 ++++--- .../coretime/coretime-westend/src/lib.rs | 7 + .../src/weights/pallet_xcm.rs | 94 ++-- .../runtimes/people/people-rococo/src/lib.rs | 7 + .../people-rococo/src/weights/pallet_xcm.rs | 412 +++++++++--------- .../runtimes/people/people-westend/src/lib.rs | 7 + .../people-westend/src/weights/pallet_xcm.rs | 412 +++++++++--------- polkadot/runtime/rococo/src/lib.rs | 7 + .../runtime/rococo/src/weights/pallet_xcm.rs | 230 ++++++---- polkadot/runtime/westend/src/lib.rs | 7 + .../runtime/westend/src/weights/pallet_xcm.rs | 238 ++++++---- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 21 +- polkadot/xcm/pallet-xcm/src/lib.rs | 63 +++ polkadot/xcm/pallet-xcm/src/mock.rs | 6 +- polkadot/xcm/pallet-xcm/src/tests/mod.rs | 51 +++ polkadot/xcm/src/lib.rs | 31 +- prdoc/pr_3403.prdoc | 22 + 29 files changed, 1501 insertions(+), 1043 deletions(-) create mode 100644 prdoc/pr_3403.prdoc diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 1c4f49fea0e..a64d4bc2365 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1454,6 +1454,13 @@ impl_runtime_apis! { }); Some((assets, fee_index as u32, dest, verify)) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } impl XcmBridgeHubRouterConfig for Runtime { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs index f8820bbb58c..51b6543bae8 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 25_003_000 picoseconds. - Weight::from_parts(25_800_000, 0) + // Minimum execution time: 22_136_000 picoseconds. + Weight::from_parts(22_518_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 88_832_000 picoseconds. - Weight::from_parts(90_491_000, 0) + // Minimum execution time: 92_277_000 picoseconds. + Weight::from_parts(94_843_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -118,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `400` // Estimated: `6196` - // Minimum execution time: 138_911_000 picoseconds. - Weight::from_parts(142_483_000, 0) + // Minimum execution time: 120_110_000 picoseconds. + Weight::from_parts(122_968_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) @@ -148,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `496` // Estimated: `6208` - // Minimum execution time: 146_932_000 picoseconds. - Weight::from_parts(153_200_000, 0) + // Minimum execution time: 143_116_000 picoseconds. + Weight::from_parts(147_355_000, 0) .saturating_add(Weight::from_parts(0, 6208)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -170,8 +170,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_081_000 picoseconds. - Weight::from_parts(7_397_000, 0) + // Minimum execution time: 6_517_000 picoseconds. + Weight::from_parts(6_756_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -181,8 +181,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_007_000 picoseconds. - Weight::from_parts(2_183_000, 0) + // Minimum execution time: 1_894_000 picoseconds. + Weight::from_parts(2_024_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -208,8 +208,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 28_790_000 picoseconds. - Weight::from_parts(29_767_000, 0) + // Minimum execution time: 27_314_000 picoseconds. + Weight::from_parts(28_787_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -234,8 +234,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 30_951_000 picoseconds. - Weight::from_parts(31_804_000, 0) + // Minimum execution time: 29_840_000 picoseconds. + Weight::from_parts(30_589_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -246,45 +246,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_164_000 picoseconds. - Weight::from_parts(2_311_000, 0) + // Minimum execution time: 1_893_000 picoseconds. + Weight::from_parts(2_017_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `162` - // Estimated: `11052` - // Minimum execution time: 16_906_000 picoseconds. - Weight::from_parts(17_612_000, 0) - .saturating_add(Weight::from_parts(0, 11052)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `159` + // Estimated: `13524` + // Minimum execution time: 19_211_000 picoseconds. + Weight::from_parts(19_552_000, 0) + .saturating_add(Weight::from_parts(0, 13524)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `166` - // Estimated: `11056` - // Minimum execution time: 17_443_000 picoseconds. - Weight::from_parts(18_032_000, 0) - .saturating_add(Weight::from_parts(0, 11056)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `163` + // Estimated: `13528` + // Minimum execution time: 19_177_000 picoseconds. + Weight::from_parts(19_704_000, 0) + .saturating_add(Weight::from_parts(0, 13528)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `13538` - // Minimum execution time: 18_992_000 picoseconds. - Weight::from_parts(19_464_000, 0) - .saturating_add(Weight::from_parts(0, 13538)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `16013` + // Minimum execution time: 20_449_000 picoseconds. + Weight::from_parts(21_075_000, 0) + .saturating_add(Weight::from_parts(0, 16013)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -304,36 +304,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 28_011_000 picoseconds. - Weight::from_parts(28_716_000, 0) + // Minimum execution time: 26_578_000 picoseconds. + Weight::from_parts(27_545_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `206` - // Estimated: `8621` - // Minimum execution time: 9_533_000 picoseconds. - Weight::from_parts(9_856_000, 0) - .saturating_add(Weight::from_parts(0, 8621)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11096` + // Minimum execution time: 11_646_000 picoseconds. + Weight::from_parts(11_944_000, 0) + .saturating_add(Weight::from_parts(0, 11096)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `173` - // Estimated: `11063` - // Minimum execution time: 17_628_000 picoseconds. - Weight::from_parts(18_146_000, 0) - .saturating_add(Weight::from_parts(0, 11063)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `170` + // Estimated: `13535` + // Minimum execution time: 19_301_000 picoseconds. + Weight::from_parts(19_664_000, 0) + .saturating_add(Weight::from_parts(0, 13535)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -349,12 +349,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `11105` - // Minimum execution time: 34_877_000 picoseconds. - Weight::from_parts(35_607_000, 0) - .saturating_add(Weight::from_parts(0, 11105)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `212` + // Estimated: `13577` + // Minimum execution time: 35_715_000 picoseconds. + Weight::from_parts(36_915_000, 0) + .saturating_add(Weight::from_parts(0, 13577)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -365,8 +365,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 5_370_000 picoseconds. - Weight::from_parts(5_616_000, 0) + // Minimum execution time: 4_871_000 picoseconds. + Weight::from_parts(5_066_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -377,10 +377,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 26_820_000 picoseconds. - Weight::from_parts(27_143_000, 0) + // Minimum execution time: 25_150_000 picoseconds. + Weight::from_parts(26_119_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `160` + // Estimated: `3625` + // Minimum execution time: 38_248_000 picoseconds. + Weight::from_parts(39_122_000, 0) + .saturating_add(Weight::from_parts(0, 3625)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index e015c7bfca3..5778ed362ee 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1526,6 +1526,13 @@ impl_runtime_apis! { }); Some((assets, fee_index as u32, dest, verify)) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } use pallet_xcm_bridge_hub_router::benchmarking::{ diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs index 504731f4a9e..71facff87d3 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 25_482_000 picoseconds. - Weight::from_parts(26_622_000, 0) + // Minimum execution time: 21_630_000 picoseconds. + Weight::from_parts(22_306_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 87_319_000 picoseconds. - Weight::from_parts(89_764_000, 0) + // Minimum execution time: 91_802_000 picoseconds. + Weight::from_parts(93_672_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -118,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `367` // Estimated: `6196` - // Minimum execution time: 139_133_000 picoseconds. - Weight::from_parts(141_507_000, 0) + // Minimum execution time: 118_930_000 picoseconds. + Weight::from_parts(122_306_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) @@ -148,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `496` // Estimated: `6208` - // Minimum execution time: 144_241_000 picoseconds. - Weight::from_parts(149_709_000, 0) + // Minimum execution time: 140_527_000 picoseconds. + Weight::from_parts(144_501_000, 0) .saturating_add(Weight::from_parts(0, 6208)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -158,8 +158,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_392_000 picoseconds. - Weight::from_parts(10_779_000, 0) + // Minimum execution time: 7_556_000 picoseconds. + Weight::from_parts(7_798_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) @@ -168,8 +168,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_088_000 picoseconds. - Weight::from_parts(7_257_000, 0) + // Minimum execution time: 6_373_000 picoseconds. + Weight::from_parts(6_603_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +179,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_095_000 picoseconds. - Weight::from_parts(2_136_000, 0) + // Minimum execution time: 1_941_000 picoseconds. + Weight::from_parts(2_088_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -206,8 +206,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 28_728_000 picoseconds. - Weight::from_parts(29_349_000, 0) + // Minimum execution time: 27_080_000 picoseconds. + Weight::from_parts(27_820_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -232,8 +232,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 30_605_000 picoseconds. - Weight::from_parts(31_477_000, 0) + // Minimum execution time: 28_850_000 picoseconds. + Weight::from_parts(29_506_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -244,45 +244,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_137_000 picoseconds. - Weight::from_parts(2_303_000, 0) + // Minimum execution time: 2_033_000 picoseconds. + Weight::from_parts(2_201_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `162` - // Estimated: `11052` - // Minimum execution time: 16_719_000 picoseconds. - Weight::from_parts(17_329_000, 0) - .saturating_add(Weight::from_parts(0, 11052)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `159` + // Estimated: `13524` + // Minimum execution time: 18_844_000 picoseconds. + Weight::from_parts(19_197_000, 0) + .saturating_add(Weight::from_parts(0, 13524)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `166` - // Estimated: `11056` - // Minimum execution time: 16_687_000 picoseconds. - Weight::from_parts(17_405_000, 0) - .saturating_add(Weight::from_parts(0, 11056)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `163` + // Estimated: `13528` + // Minimum execution time: 18_940_000 picoseconds. + Weight::from_parts(19_450_000, 0) + .saturating_add(Weight::from_parts(0, 13528)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `13538` - // Minimum execution time: 18_751_000 picoseconds. - Weight::from_parts(19_130_000, 0) - .saturating_add(Weight::from_parts(0, 13538)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `16013` + // Minimum execution time: 20_521_000 picoseconds. + Weight::from_parts(21_076_000, 0) + .saturating_add(Weight::from_parts(0, 16013)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -302,36 +302,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 27_189_000 picoseconds. - Weight::from_parts(27_760_000, 0) + // Minimum execution time: 26_007_000 picoseconds. + Weight::from_parts(26_448_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `206` - // Estimated: `8621` - // Minimum execution time: 9_307_000 picoseconds. - Weight::from_parts(9_691_000, 0) - .saturating_add(Weight::from_parts(0, 8621)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11096` + // Minimum execution time: 11_584_000 picoseconds. + Weight::from_parts(12_080_000, 0) + .saturating_add(Weight::from_parts(0, 11096)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `173` - // Estimated: `11063` - // Minimum execution time: 17_607_000 picoseconds. - Weight::from_parts(18_090_000, 0) - .saturating_add(Weight::from_parts(0, 11063)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `170` + // Estimated: `13535` + // Minimum execution time: 19_157_000 picoseconds. + Weight::from_parts(19_513_000, 0) + .saturating_add(Weight::from_parts(0, 13535)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -347,12 +347,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `11105` - // Minimum execution time: 34_322_000 picoseconds. - Weight::from_parts(35_754_000, 0) - .saturating_add(Weight::from_parts(0, 11105)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `212` + // Estimated: `13577` + // Minimum execution time: 34_878_000 picoseconds. + Weight::from_parts(35_623_000, 0) + .saturating_add(Weight::from_parts(0, 13577)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -363,8 +363,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 4_513_000 picoseconds. - Weight::from_parts(4_754_000, 0) + // Minimum execution time: 3_900_000 picoseconds. + Weight::from_parts(4_161_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -375,10 +375,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 27_860_000 picoseconds. - Weight::from_parts(28_279_000, 0) + // Minimum execution time: 25_731_000 picoseconds. + Weight::from_parts(26_160_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `160` + // Estimated: `3625` + // Minimum execution time: 37_251_000 picoseconds. + Weight::from_parts(38_075_000, 0) + .saturating_add(Weight::from_parts(0, 3625)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 0f6af1b51f1..ee472f006fa 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -1148,6 +1148,13 @@ impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } use xcm::latest::prelude::*; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs index 5faded42aa8..a732e1a5734 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_683_000 picoseconds. - Weight::from_parts(24_199_000, 0) + // Minimum execution time: 18_513_000 picoseconds. + Weight::from_parts(19_156_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 89_524_000 picoseconds. - Weight::from_parts(91_401_000, 0) + // Minimum execution time: 88_096_000 picoseconds. + Weight::from_parts(89_732_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -126,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 91_890_000 picoseconds. - Weight::from_parts(93_460_000, 0) + // Minimum execution time: 88_239_000 picoseconds. + Weight::from_parts(89_729_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_152_000 picoseconds. - Weight::from_parts(7_355_000, 0) + // Minimum execution time: 5_955_000 picoseconds. + Weight::from_parts(6_266_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_081_000 picoseconds. - Weight::from_parts(2_258_000, 0) + // Minimum execution time: 1_868_000 picoseconds. + Weight::from_parts(1_961_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 28_067_000 picoseconds. - Weight::from_parts(28_693_000, 0) + // Minimum execution time: 24_388_000 picoseconds. + Weight::from_parts(25_072_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 30_420_000 picoseconds. - Weight::from_parts(31_373_000, 0) + // Minimum execution time: 26_762_000 picoseconds. + Weight::from_parts(27_631_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,45 +224,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_087_000 picoseconds. - Weight::from_parts(2_243_000, 0) + // Minimum execution time: 1_856_000 picoseconds. + Weight::from_parts(2_033_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 15_142_000 picoseconds. - Weight::from_parts(15_598_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `89` + // Estimated: `13454` + // Minimum execution time: 17_718_000 picoseconds. + Weight::from_parts(18_208_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 15_041_000 picoseconds. - Weight::from_parts(15_493_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `93` + // Estimated: `13458` + // Minimum execution time: 17_597_000 picoseconds. + Weight::from_parts(18_090_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 16_624_000 picoseconds. - Weight::from_parts(17_031_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15946` + // Minimum execution time: 19_533_000 picoseconds. + Weight::from_parts(20_164_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -282,36 +282,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 26_398_000 picoseconds. - Weight::from_parts(26_847_000, 0) + // Minimum execution time: 24_958_000 picoseconds. + Weight::from_parts(25_628_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_741_000 picoseconds. - Weight::from_parts(8_954_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11026` + // Minimum execution time: 12_209_000 picoseconds. + Weight::from_parts(12_612_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 15_306_000 picoseconds. - Weight::from_parts(15_760_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `100` + // Estimated: `13465` + // Minimum execution time: 17_844_000 picoseconds. + Weight::from_parts(18_266_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -327,12 +327,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `112` - // Estimated: `11002` - // Minimum execution time: 33_127_000 picoseconds. - Weight::from_parts(33_938_000, 0) - .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `106` + // Estimated: `13471` + // Minimum execution time: 34_131_000 picoseconds. + Weight::from_parts(34_766_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -343,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_290_000 picoseconds. - Weight::from_parts(4_450_000, 0) + // Minimum execution time: 3_525_000 picoseconds. + Weight::from_parts(3_724_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,10 +355,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 26_408_000 picoseconds. - Weight::from_parts(26_900_000, 0) + // Minimum execution time: 24_975_000 picoseconds. + Weight::from_parts(25_517_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 33_761_000 picoseconds. + Weight::from_parts(34_674_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 206743ba6ac..60d959b4053 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -843,6 +843,13 @@ impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } use xcm::latest::prelude::*; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs index 83e4260e771..a78ff2355ef 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_219_000 picoseconds. - Weight::from_parts(23_818_000, 0) + // Minimum execution time: 19_527_000 picoseconds. + Weight::from_parts(19_839_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -88,10 +88,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `70` + // Measured: `107` // Estimated: `3593` - // Minimum execution time: 90_120_000 picoseconds. - Weight::from_parts(92_545_000, 0) + // Minimum execution time: 90_938_000 picoseconds. + Weight::from_parts(92_822_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -124,10 +124,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `70` + // Measured: `107` // Estimated: `3593` - // Minimum execution time: 91_339_000 picoseconds. - Weight::from_parts(93_204_000, 0) + // Minimum execution time: 90_133_000 picoseconds. + Weight::from_parts(92_308_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_976_000 picoseconds. - Weight::from_parts(7_284_000, 0) + // Minimum execution time: 6_205_000 picoseconds. + Weight::from_parts(6_595_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_044_000 picoseconds. - Weight::from_parts(2_223_000, 0) + // Minimum execution time: 1_927_000 picoseconds. + Weight::from_parts(2_062_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 27_778_000 picoseconds. - Weight::from_parts(28_318_000, 0) + // Minimum execution time: 25_078_000 picoseconds. + Weight::from_parts(25_782_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 30_446_000 picoseconds. - Weight::from_parts(31_925_000, 0) + // Minimum execution time: 28_188_000 picoseconds. + Weight::from_parts(28_826_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,45 +224,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_037_000 picoseconds. - Weight::from_parts(2_211_000, 0) + // Minimum execution time: 1_886_000 picoseconds. + Weight::from_parts(1_991_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 15_620_000 picoseconds. - Weight::from_parts(15_984_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `89` + // Estimated: `13454` + // Minimum execution time: 17_443_000 picoseconds. + Weight::from_parts(17_964_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 15_689_000 picoseconds. - Weight::from_parts(16_093_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `93` + // Estimated: `13458` + // Minimum execution time: 17_357_000 picoseconds. + Weight::from_parts(18_006_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 16_946_000 picoseconds. - Weight::from_parts(17_192_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15946` + // Minimum execution time: 18_838_000 picoseconds. + Weight::from_parts(19_688_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -282,36 +282,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 27_164_000 picoseconds. - Weight::from_parts(27_760_000, 0) + // Minimum execution time: 25_517_000 picoseconds. + Weight::from_parts(26_131_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_689_000 picoseconds. - Weight::from_parts(8_874_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11026` + // Minimum execution time: 11_587_000 picoseconds. + Weight::from_parts(11_963_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 15_944_000 picoseconds. - Weight::from_parts(16_381_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `100` + // Estimated: `13465` + // Minimum execution time: 17_490_000 picoseconds. + Weight::from_parts(18_160_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -327,12 +327,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `112` - // Estimated: `11002` - // Minimum execution time: 33_826_000 picoseconds. - Weight::from_parts(34_784_000, 0) - .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `106` + // Estimated: `13471` + // Minimum execution time: 34_088_000 picoseconds. + Weight::from_parts(34_598_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -343,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_257_000 picoseconds. - Weight::from_parts(4_383_000, 0) + // Minimum execution time: 3_566_000 picoseconds. + Weight::from_parts(3_754_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,10 +355,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 26_924_000 picoseconds. - Weight::from_parts(27_455_000, 0) + // Minimum execution time: 25_078_000 picoseconds. + Weight::from_parts(25_477_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 34_661_000 picoseconds. + Weight::from_parts(35_411_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 3ed9d6e8e16..9a88ca174df 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -1032,6 +1032,13 @@ impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } let whitelist: Vec = vec![ diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs index 50dfbffde01..5d427d85004 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-r43aesjn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 24_540_000 picoseconds. - Weight::from_parts(25_439_000, 0) + // Minimum execution time: 21_813_000 picoseconds. + Weight::from_parts(22_332_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -90,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 86_614_000 picoseconds. - Weight::from_parts(88_884_000, 0) + // Minimum execution time: 93_243_000 picoseconds. + Weight::from_parts(95_650_000, 0) .saturating_add(Weight::from_parts(0, 3679)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -126,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 87_915_000 picoseconds. - Weight::from_parts(90_219_000, 0) + // Minimum execution time: 96_199_000 picoseconds. + Weight::from_parts(98_620_000, 0) .saturating_add(Weight::from_parts(0, 3679)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_872_000 picoseconds. - Weight::from_parts(7_110_000, 0) + // Minimum execution time: 6_442_000 picoseconds. + Weight::from_parts(6_682_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_009_000 picoseconds. - Weight::from_parts(2_163_000, 0) + // Minimum execution time: 1_833_000 picoseconds. + Weight::from_parts(1_973_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 28_858_000 picoseconds. - Weight::from_parts(29_355_000, 0) + // Minimum execution time: 27_318_000 picoseconds. + Weight::from_parts(28_224_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 30_598_000 picoseconds. - Weight::from_parts(31_168_000, 0) + // Minimum execution time: 29_070_000 picoseconds. + Weight::from_parts(30_205_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,45 +224,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_090_000 picoseconds. - Weight::from_parts(2_253_000, 0) + // Minimum execution time: 1_904_000 picoseconds. + Weight::from_parts(2_033_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `162` - // Estimated: `11052` - // Minimum execution time: 16_133_000 picoseconds. - Weight::from_parts(16_433_000, 0) - .saturating_add(Weight::from_parts(0, 11052)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `159` + // Estimated: `13524` + // Minimum execution time: 18_348_000 picoseconds. + Weight::from_parts(18_853_000, 0) + .saturating_add(Weight::from_parts(0, 13524)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `166` - // Estimated: `11056` - // Minimum execution time: 16_012_000 picoseconds. - Weight::from_parts(16_449_000, 0) - .saturating_add(Weight::from_parts(0, 11056)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `163` + // Estimated: `13528` + // Minimum execution time: 17_964_000 picoseconds. + Weight::from_parts(18_548_000, 0) + .saturating_add(Weight::from_parts(0, 13528)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `13538` - // Minimum execution time: 17_922_000 picoseconds. - Weight::from_parts(18_426_000, 0) - .saturating_add(Weight::from_parts(0, 13538)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `16013` + // Minimum execution time: 19_708_000 picoseconds. + Weight::from_parts(20_157_000, 0) + .saturating_add(Weight::from_parts(0, 16013)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -282,36 +282,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 27_280_000 picoseconds. - Weight::from_parts(28_026_000, 0) + // Minimum execution time: 26_632_000 picoseconds. + Weight::from_parts(27_314_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `206` - // Estimated: `8621` - // Minimum execution time: 9_387_000 picoseconds. - Weight::from_parts(9_644_000, 0) - .saturating_add(Weight::from_parts(0, 8621)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11096` + // Minimum execution time: 11_929_000 picoseconds. + Weight::from_parts(12_304_000, 0) + .saturating_add(Weight::from_parts(0, 11096)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `173` - // Estimated: `11063` - // Minimum execution time: 16_649_000 picoseconds. - Weight::from_parts(17_025_000, 0) - .saturating_add(Weight::from_parts(0, 11063)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `170` + // Estimated: `13535` + // Minimum execution time: 18_599_000 picoseconds. + Weight::from_parts(19_195_000, 0) + .saturating_add(Weight::from_parts(0, 13535)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -327,12 +327,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `11105` - // Minimum execution time: 34_355_000 picoseconds. - Weight::from_parts(35_295_000, 0) - .saturating_add(Weight::from_parts(0, 11105)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `212` + // Estimated: `13577` + // Minimum execution time: 35_524_000 picoseconds. + Weight::from_parts(36_272_000, 0) + .saturating_add(Weight::from_parts(0, 13577)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -343,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 4_527_000 picoseconds. - Weight::from_parts(4_699_000, 0) + // Minimum execution time: 4_044_000 picoseconds. + Weight::from_parts(4_238_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,10 +355,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 27_011_000 picoseconds. - Weight::from_parts(27_398_000, 0) + // Minimum execution time: 25_741_000 picoseconds. + Weight::from_parts(26_301_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `160` + // Estimated: `3625` + // Minimum execution time: 35_925_000 picoseconds. + Weight::from_parts(36_978_000, 0) + .saturating_add(Weight::from_parts(0, 3625)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index c4df5b9a565..c73d2225ebd 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -764,6 +764,13 @@ impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(EXISTENTIAL_DEPOSIT), + } + } } let whitelist: Vec = vec![ diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 0cdfebce742..d95e68d85c4 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -740,6 +740,13 @@ impl_runtime_apis! { // Reserve transfers are disabled None } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } parameter_types! { diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs index 0e34cba4aaf..c5d315467c1 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs @@ -16,26 +16,24 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=coretime-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=coretime-rococo-dev // --header=./cumulus/file_header.txt // --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ @@ -64,8 +62,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 22_669_000 picoseconds. - Weight::from_parts(23_227_000, 0) + // Minimum execution time: 35_051_000 picoseconds. + Weight::from_parts(35_200_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -86,8 +84,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 64_486_000 picoseconds. - Weight::from_parts(65_247_000, 0) + // Minimum execution time: 56_235_000 picoseconds. + Weight::from_parts(58_178_000, 0) .saturating_add(Weight::from_parts(0, 3571)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -128,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_020_000 picoseconds. - Weight::from_parts(7_300_000, 0) + // Minimum execution time: 6_226_000 picoseconds. + Weight::from_parts(6_403_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -139,8 +137,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_022_000 picoseconds. - Weight::from_parts(2_141_000, 0) + // Minimum execution time: 2_020_000 picoseconds. + Weight::from_parts(2_100_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -164,8 +162,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 26_893_000 picoseconds. - Weight::from_parts(27_497_000, 0) + // Minimum execution time: 24_387_000 picoseconds. + Weight::from_parts(24_814_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -188,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 29_673_000 picoseconds. - Weight::from_parts(30_693_000, 0) + // Minimum execution time: 27_039_000 picoseconds. + Weight::from_parts(27_693_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -200,45 +198,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_990_000 picoseconds. - Weight::from_parts(2_105_000, 0) + // Minimum execution time: 1_920_000 picoseconds. + Weight::from_parts(2_082_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 14_819_000 picoseconds. - Weight::from_parts(15_180_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `89` + // Estimated: `13454` + // Minimum execution time: 17_141_000 picoseconds. + Weight::from_parts(17_500_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 14_935_000 picoseconds. - Weight::from_parts(15_335_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `93` + // Estimated: `13458` + // Minimum execution time: 17_074_000 picoseconds. + Weight::from_parts(17_431_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 16_278_000 picoseconds. - Weight::from_parts(16_553_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15946` + // Minimum execution time: 19_139_000 picoseconds. + Weight::from_parts(19_474_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -256,36 +254,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 26_360_000 picoseconds. - Weight::from_parts(26_868_000, 0) + // Minimum execution time: 24_346_000 picoseconds. + Weight::from_parts(25_318_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_615_000 picoseconds. - Weight::from_parts(8_903_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11026` + // Minimum execution time: 11_777_000 picoseconds. + Weight::from_parts(12_051_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 15_284_000 picoseconds. - Weight::from_parts(15_504_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `100` + // Estimated: `13465` + // Minimum execution time: 17_538_000 picoseconds. + Weight::from_parts(17_832_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -299,12 +297,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `148` - // Estimated: `11038` - // Minimum execution time: 32_675_000 picoseconds. - Weight::from_parts(33_816_000, 0) - .saturating_add(Weight::from_parts(0, 11038)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `142` + // Estimated: `13507` + // Minimum execution time: 33_623_000 picoseconds. + Weight::from_parts(34_186_000, 0) + .saturating_add(Weight::from_parts(0, 13507)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -315,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 4_058_000 picoseconds. - Weight::from_parts(4_170_000, 0) + // Minimum execution time: 3_363_000 picoseconds. + Weight::from_parts(3_511_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -327,10 +325,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 25_375_000 picoseconds. - Weight::from_parts(26_026_000, 0) + // Minimum execution time: 23_969_000 picoseconds. + Weight::from_parts(24_347_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 34_071_000 picoseconds. + Weight::from_parts(35_031_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 7797329b526..4b9b180321d 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -731,6 +731,13 @@ impl_runtime_apis! { // Reserve transfers are disabled None } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } parameter_types! { diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs index d821a581b0d..0082db3099d 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs @@ -17,25 +17,23 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=coretime-westend-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=coretime-westend-dev // --header=./cumulus/file_header.txt // --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ @@ -64,8 +62,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 17_946_000 picoseconds. - Weight::from_parts(18_398_000, 0) + // Minimum execution time: 18_410_000 picoseconds. + Weight::from_parts(18_657_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -86,8 +84,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 47_982_000 picoseconds. - Weight::from_parts(49_215_000, 0) + // Minimum execution time: 56_616_000 picoseconds. + Weight::from_parts(57_751_000, 0) .saturating_add(Weight::from_parts(0, 3571)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -128,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_042_000 picoseconds. - Weight::from_parts(6_257_000, 0) + // Minimum execution time: 6_014_000 picoseconds. + Weight::from_parts(6_412_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -139,8 +137,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_845_000 picoseconds. - Weight::from_parts(1_993_000, 0) + // Minimum execution time: 1_844_000 picoseconds. + Weight::from_parts(1_957_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -164,8 +162,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 24_062_000 picoseconds. - Weight::from_parts(24_666_000, 0) + // Minimum execution time: 24_067_000 picoseconds. + Weight::from_parts(24_553_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -188,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 26_486_000 picoseconds. - Weight::from_parts(27_528_000, 0) + // Minimum execution time: 27_023_000 picoseconds. + Weight::from_parts(27_620_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -200,8 +198,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_881_000 picoseconds. - Weight::from_parts(2_008_000, 0) + // Minimum execution time: 1_866_000 picoseconds. + Weight::from_parts(1_984_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -211,8 +209,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 15_971_000 picoseconds. - Weight::from_parts(16_455_000, 0) + // Minimum execution time: 16_425_000 picoseconds. + Weight::from_parts(16_680_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -223,8 +221,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_603_000 picoseconds. - Weight::from_parts(17_037_000, 0) + // Minimum execution time: 16_171_000 picoseconds. + Weight::from_parts(16_564_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -235,8 +233,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 17_821_000 picoseconds. - Weight::from_parts(18_200_000, 0) + // Minimum execution time: 17_785_000 picoseconds. + Weight::from_parts(18_123_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -256,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 23_878_000 picoseconds. - Weight::from_parts(24_721_000, 0) + // Minimum execution time: 23_903_000 picoseconds. + Weight::from_parts(24_769_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -268,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 10_566_000 picoseconds. - Weight::from_parts(11_053_000, 0) + // Minimum execution time: 10_617_000 picoseconds. + Weight::from_parts(10_843_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -279,8 +277,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_020_000 picoseconds. - Weight::from_parts(16_619_000, 0) + // Minimum execution time: 16_656_000 picoseconds. + Weight::from_parts(17_106_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -301,8 +299,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `13507` - // Minimum execution time: 32_136_000 picoseconds. - Weight::from_parts(32_610_000, 0) + // Minimum execution time: 31_721_000 picoseconds. + Weight::from_parts(32_547_000, 0) .saturating_add(Weight::from_parts(0, 13507)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -315,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_336_000 picoseconds. - Weight::from_parts(3_434_000, 0) + // Minimum execution time: 3_439_000 picoseconds. + Weight::from_parts(3_619_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -327,10 +325,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 23_977_000 picoseconds. - Weight::from_parts(24_413_000, 0) + // Minimum execution time: 24_657_000 picoseconds. + Weight::from_parts(24_971_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 34_028_000 picoseconds. + Weight::from_parts(34_697_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 616da5519a5..28b8062e0e0 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -710,6 +710,13 @@ impl_runtime_apis! { fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> { None } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } use xcm::latest::prelude::*; diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs index 0f793524de9..fabce29b5fd 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs @@ -1,40 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("people-kusama-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=people-kusama-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./cumulus/parachains/runtimes/people/people-kusama/src/weights/pallet_xcm.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=people-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,57 +48,28 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_931_000 picoseconds. - Weight::from_parts(26_340_000, 0) + // Minimum execution time: 17_830_000 picoseconds. + Weight::from_parts(18_411_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - fn teleport_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1489` - // Minimum execution time: 25_691_000 picoseconds. - Weight::from_parts(25_971_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) - fn reserve_transfer_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -108,18 +80,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn teleport_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 55_456_000 picoseconds. + Weight::from_parts(56_808_000, 0) + .saturating_add(Weight::from_parts(0, 3535)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn reserve_transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `496` - // Estimated: `6208` - // Minimum execution time: 146_932_000 picoseconds. - Weight::from_parts(153_200_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(12)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -128,189 +120,189 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: PolkadotXcm SupportedVersion (r:0 w:1) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_572_000 picoseconds. - Weight::from_parts(9_924_000, 0) + // Minimum execution time: 5_996_000 picoseconds. + Weight::from_parts(6_154_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm SafeXcmVersion (r:0 w:1) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_default_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_997_000 picoseconds. - Weight::from_parts(3_136_000, 0) + // Minimum execution time: 1_768_000 picoseconds. + Weight::from_parts(1_914_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm QueryCounter (r:1 w:1) - /// Proof Skipped: PolkadotXcm QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm Queries (r:0 w:1) - /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 30_271_000 picoseconds. - Weight::from_parts(30_819_000, 0) + // Minimum execution time: 24_120_000 picoseconds. + Weight::from_parts(24_745_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm Queries (r:0 w:1) - /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `220` - // Estimated: `3685` - // Minimum execution time: 32_302_000 picoseconds. - Weight::from_parts(32_807_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) + // Measured: `255` + // Estimated: `3720` + // Minimum execution time: 26_630_000 picoseconds. + Weight::from_parts(27_289_000, 0) + .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: PolkadotXcm XcmExecutionSuspended (r:0 w:1) - /// Proof Skipped: PolkadotXcm XcmExecutionSuspended (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_suspension() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_960_000 picoseconds. - Weight::from_parts(3_094_000, 0) + // Minimum execution time: 1_821_000 picoseconds. + Weight::from_parts(1_946_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm SupportedVersion (r:4 w:2) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 14_877_000 picoseconds. - Weight::from_parts(15_296_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `89` + // Estimated: `13454` + // Minimum execution time: 16_586_000 picoseconds. + Weight::from_parts(16_977_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifiers (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 14_835_000 picoseconds. - Weight::from_parts(15_115_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `93` + // Estimated: `13458` + // Minimum execution time: 16_923_000 picoseconds. + Weight::from_parts(17_415_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:5 w:0) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 15_368_000 picoseconds. - Weight::from_parts(15_596_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15946` + // Minimum execution time: 18_596_000 picoseconds. + Weight::from_parts(18_823_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:2 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 28_025_000 picoseconds. - Weight::from_parts(28_524_000, 0) + // Minimum execution time: 23_817_000 picoseconds. + Weight::from_parts(24_520_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:3 w:0) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_166_000 picoseconds. - Weight::from_parts(8_314_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11026` + // Minimum execution time: 11_042_000 picoseconds. + Weight::from_parts(11_578_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 14_871_000 picoseconds. - Weight::from_parts(15_374_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `100` + // Estimated: `13465` + // Minimum execution time: 17_306_000 picoseconds. + Weight::from_parts(17_817_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `112` - // Estimated: `11002` - // Minimum execution time: 33_611_000 picoseconds. - Weight::from_parts(34_008_000, 0) - .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `106` + // Estimated: `13471` + // Minimum execution time: 32_141_000 picoseconds. + Weight::from_parts(32_954_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -319,11 +311,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn new_query() -> Weight { // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `1588` - // Minimum execution time: 5_496_000 picoseconds. - Weight::from_parts(5_652_000, 0) - .saturating_add(Weight::from_parts(0, 1588)) + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 3_410_000 picoseconds. + Weight::from_parts(3_556_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -331,11 +323,23 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn take_response() -> Weight { // Proof Size summary in bytes: - // Measured: `7740` - // Estimated: `11205` - // Minimum execution time: 26_140_000 picoseconds. - Weight::from_parts(26_824_000, 0) - .saturating_add(Weight::from_parts(0, 11205)) + // Measured: `7669` + // Estimated: `11134` + // Minimum execution time: 25_021_000 picoseconds. + Weight::from_parts(25_240_000, 0) + .saturating_add(Weight::from_parts(0, 11134)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 33_801_000 picoseconds. + Weight::from_parts(34_655_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 9f572330cac..b2b32445613 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -710,6 +710,13 @@ impl_runtime_apis! { fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> { None } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } } use xcm::latest::prelude::*; diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs index d3b60471b85..c337289243b 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs @@ -1,40 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("people-polkadot-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=people-polkadot-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./cumulus/parachains/runtimes/people/people-polkadot/src/weights/pallet_xcm.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=people-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,57 +48,28 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_783_000 picoseconds. - Weight::from_parts(26_398_000, 0) + // Minimum execution time: 17_856_000 picoseconds. + Weight::from_parts(18_473_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - fn teleport_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1489` - // Minimum execution time: 25_511_000 picoseconds. - Weight::from_parts(26_120_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) - fn reserve_transfer_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -108,18 +80,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn teleport_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 56_112_000 picoseconds. + Weight::from_parts(57_287_000, 0) + .saturating_add(Weight::from_parts(0, 3535)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn reserve_transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `496` - // Estimated: `6208` - // Minimum execution time: 146_932_000 picoseconds. - Weight::from_parts(153_200_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(12)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -128,189 +120,189 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: PolkadotXcm SupportedVersion (r:0 w:1) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_707_000 picoseconds. - Weight::from_parts(9_874_000, 0) + // Minimum execution time: 6_186_000 picoseconds. + Weight::from_parts(6_420_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm SafeXcmVersion (r:0 w:1) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_default_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_073_000 picoseconds. - Weight::from_parts(3_183_000, 0) + // Minimum execution time: 1_824_000 picoseconds. + Weight::from_parts(1_999_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm QueryCounter (r:1 w:1) - /// Proof Skipped: PolkadotXcm QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm Queries (r:0 w:1) - /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 30_999_000 picoseconds. - Weight::from_parts(31_641_000, 0) + // Minimum execution time: 23_833_000 picoseconds. + Weight::from_parts(24_636_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: PolkadotXcm VersionNotifiers (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm Queries (r:0 w:1) - /// Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `220` - // Estimated: `3685` - // Minimum execution time: 33_036_000 picoseconds. - Weight::from_parts(33_596_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) + // Measured: `255` + // Estimated: `3720` + // Minimum execution time: 26_557_000 picoseconds. + Weight::from_parts(27_275_000, 0) + .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: PolkadotXcm XcmExecutionSuspended (r:0 w:1) - /// Proof Skipped: PolkadotXcm XcmExecutionSuspended (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_suspension() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_035_000 picoseconds. - Weight::from_parts(3_154_000, 0) + // Minimum execution time: 1_921_000 picoseconds. + Weight::from_parts(2_040_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: PolkadotXcm SupportedVersion (r:4 w:2) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 14_805_000 picoseconds. - Weight::from_parts(15_120_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `89` + // Estimated: `13454` + // Minimum execution time: 16_832_000 picoseconds. + Weight::from_parts(17_312_000, 0) + .saturating_add(Weight::from_parts(0, 13454)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifiers (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifiers (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 14_572_000 picoseconds. - Weight::from_parts(14_909_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `93` + // Estimated: `13458` + // Minimum execution time: 16_687_000 picoseconds. + Weight::from_parts(17_123_000, 0) + .saturating_add(Weight::from_parts(0, 13458)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:5 w:0) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 15_341_000 picoseconds. - Weight::from_parts(15_708_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15946` + // Minimum execution time: 18_164_000 picoseconds. + Weight::from_parts(18_580_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(6)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:2 w:1) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 27_840_000 picoseconds. - Weight::from_parts(28_248_000, 0) + // Minimum execution time: 23_577_000 picoseconds. + Weight::from_parts(24_324_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:3 w:0) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_245_000 picoseconds. - Weight::from_parts(8_523_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `11026` + // Minimum execution time: 11_014_000 picoseconds. + Weight::from_parts(11_223_000, 0) + .saturating_add(Weight::from_parts(0, 11026)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 14_780_000 picoseconds. - Weight::from_parts(15_173_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `100` + // Estimated: `13465` + // Minimum execution time: 16_887_000 picoseconds. + Weight::from_parts(17_361_000, 0) + .saturating_add(Weight::from_parts(0, 13465)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: PolkadotXcm VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem HostConfiguration (r:1 w:0) - /// Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - /// Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `112` - // Estimated: `11002` - // Minimum execution time: 33_422_000 picoseconds. - Weight::from_parts(34_076_000, 0) - .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `106` + // Estimated: `13471` + // Minimum execution time: 31_705_000 picoseconds. + Weight::from_parts(32_166_000, 0) + .saturating_add(Weight::from_parts(0, 13471)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -319,11 +311,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn new_query() -> Weight { // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `1588` - // Minimum execution time: 5_496_000 picoseconds. - Weight::from_parts(5_652_000, 0) - .saturating_add(Weight::from_parts(0, 1588)) + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 3_568_000 picoseconds. + Weight::from_parts(3_669_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -331,11 +323,23 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn take_response() -> Weight { // Proof Size summary in bytes: - // Measured: `7740` - // Estimated: `11205` - // Minimum execution time: 26_140_000 picoseconds. - Weight::from_parts(26_824_000, 0) - .saturating_add(Weight::from_parts(0, 11205)) + // Measured: `7669` + // Estimated: `11134` + // Minimum execution time: 24_823_000 picoseconds. + Weight::from_parts(25_344_000, 0) + .saturating_add(Weight::from_parts(0, 11134)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 34_516_000 picoseconds. + Weight::from_parts(35_478_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index f3c0c3d6bd8..48395f45238 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -2336,6 +2336,13 @@ sp_api::impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::here()), + fun: Fungible(ExistentialDeposit::get()), + } + } } impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = XcmConfig; diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs index 177407ef708..5544ca44658 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -48,10 +48,6 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - fn transfer_assets() -> Weight { - // TODO: run benchmarks - Weight::zero() - } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) @@ -62,36 +58,80 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 27_328_000 picoseconds. - Weight::from_parts(27_976_000, 0) - .saturating_add(Weight::from_parts(0, 3607)) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 25_043_000 picoseconds. + Weight::from_parts(25_682_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 16_280_000 picoseconds. - Weight::from_parts(16_904_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 107_570_000 picoseconds. + Weight::from_parts(109_878_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 15_869_000 picoseconds. - Weight::from_parts(16_264_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `232` + // Estimated: `3697` + // Minimum execution time: 106_341_000 picoseconds. + Weight::from_parts(109_135_000, 0) + .saturating_add(Weight::from_parts(0, 3697)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 108_372_000 picoseconds. + Weight::from_parts(112_890_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_923_000 picoseconds. - Weight::from_parts(7_432_000, 0) + // Minimum execution time: 6_957_000 picoseconds. + Weight::from_parts(7_417_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) @@ -100,8 +140,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_333_000 picoseconds. - Weight::from_parts(7_566_000, 0) + // Minimum execution time: 7_053_000 picoseconds. + Weight::from_parts(7_462_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -109,8 +149,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_219_000 picoseconds. - Weight::from_parts(2_375_000, 0) + // Minimum execution time: 1_918_000 picoseconds. + Weight::from_parts(2_037_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -129,11 +169,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 30_650_000 picoseconds. - Weight::from_parts(31_683_000, 0) - .saturating_add(Weight::from_parts(0, 3607)) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 30_417_000 picoseconds. + Weight::from_parts(31_191_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -151,11 +191,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `322` - // Estimated: `3787` - // Minimum execution time: 37_666_000 picoseconds. - Weight::from_parts(38_920_000, 0) - .saturating_add(Weight::from_parts(0, 3787)) + // Measured: `360` + // Estimated: `3825` + // Minimum execution time: 36_666_000 picoseconds. + Weight::from_parts(37_779_000, 0) + .saturating_add(Weight::from_parts(0, 3825)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -165,45 +205,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_244_000 picoseconds. - Weight::from_parts(2_425_000, 0) + // Minimum execution time: 1_869_000 picoseconds. + Weight::from_parts(2_003_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmPallet::SupportedVersion` (r:4 w:2) + /// Storage: `XcmPallet::SupportedVersion` (r:5 w:2) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `26` - // Estimated: `10916` - // Minimum execution time: 14_710_000 picoseconds. - Weight::from_parts(15_156_000, 0) - .saturating_add(Weight::from_parts(0, 10916)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `22` + // Estimated: `13387` + // Minimum execution time: 16_188_000 picoseconds. + Weight::from_parts(16_435_000, 0) + .saturating_add(Weight::from_parts(0, 13387)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifiers` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifiers` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `30` - // Estimated: `10920` - // Minimum execution time: 14_630_000 picoseconds. - Weight::from_parts(15_290_000, 0) - .saturating_add(Weight::from_parts(0, 10920)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `26` + // Estimated: `13391` + // Minimum execution time: 16_431_000 picoseconds. + Weight::from_parts(16_935_000, 0) + .saturating_add(Weight::from_parts(0, 13391)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `40` - // Estimated: `13405` - // Minimum execution time: 16_686_000 picoseconds. - Weight::from_parts(17_332_000, 0) - .saturating_add(Weight::from_parts(0, 13405)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15880` + // Minimum execution time: 18_460_000 picoseconds. + Weight::from_parts(18_885_000, 0) + .saturating_add(Weight::from_parts(0, 15880)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -217,38 +257,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `178` - // Estimated: `6118` - // Minimum execution time: 30_180_000 picoseconds. - Weight::from_parts(31_351_000, 0) - .saturating_add(Weight::from_parts(0, 6118)) + // Measured: `216` + // Estimated: `6156` + // Minimum execution time: 29_623_000 picoseconds. + Weight::from_parts(30_661_000, 0) + .saturating_add(Weight::from_parts(0, 6156)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:3 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `69` - // Estimated: `8484` - // Minimum execution time: 9_624_000 picoseconds. - Weight::from_parts(10_029_000, 0) - .saturating_add(Weight::from_parts(0, 8484)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `10959` + // Minimum execution time: 12_043_000 picoseconds. + Weight::from_parts(12_360_000, 0) + .saturating_add(Weight::from_parts(0, 10959)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `37` - // Estimated: `10927` - // Minimum execution time: 15_139_000 picoseconds. - Weight::from_parts(15_575_000, 0) - .saturating_add(Weight::from_parts(0, 10927)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `33` + // Estimated: `13398` + // Minimum execution time: 16_511_000 picoseconds. + Weight::from_parts(17_011_000, 0) + .saturating_add(Weight::from_parts(0, 13398)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -260,12 +300,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `182` - // Estimated: `11072` - // Minimum execution time: 37_871_000 picoseconds. - Weight::from_parts(38_940_000, 0) - .saturating_add(Weight::from_parts(0, 11072)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `216` + // Estimated: `13581` + // Minimum execution time: 39_041_000 picoseconds. + Weight::from_parts(39_883_000, 0) + .saturating_add(Weight::from_parts(0, 13581)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) @@ -276,8 +316,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_732_000 picoseconds. - Weight::from_parts(2_892_000, 0) + // Minimum execution time: 2_030_000 picoseconds. + Weight::from_parts(2_150_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -288,10 +328,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 23_813_000 picoseconds. - Weight::from_parts(24_201_000, 0) + // Minimum execution time: 22_615_000 picoseconds. + Weight::from_parts(23_008_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) + /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `23` + // Estimated: `3488` + // Minimum execution time: 34_438_000 picoseconds. + Weight::from_parts(35_514_000, 0) + .saturating_add(Weight::from_parts(0, 3488)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index a7772303a95..4ac2202b8fc 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2412,6 +2412,13 @@ sp_api::impl_runtime_apis! { dest ) } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::here()), + fun: Fungible(ExistentialDeposit::get()), + } + } } impl frame_system_benchmarking::Config for Runtime {} impl pallet_nomination_pools_benchmarking::Config for Runtime {} diff --git a/polkadot/runtime/westend/src/weights/pallet_xcm.rs b/polkadot/runtime/westend/src/weights/pallet_xcm.rs index 493acd0f9e7..10725cecf24 100644 --- a/polkadot/runtime/westend/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/westend/src/weights/pallet_xcm.rs @@ -16,26 +16,24 @@ //! Autogenerated weights for `pallet_xcm` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=westend-dev // --steps=50 // --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=pallet_xcm // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=westend-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/westend/src/weights/ @@ -50,10 +48,6 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - fn transfer_assets() -> Weight { - // TODO: run benchmarks - Weight::zero() - } /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) @@ -64,29 +58,73 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 28_098_000 picoseconds. - Weight::from_parts(28_887_000, 0) - .saturating_add(Weight::from_parts(0, 3574)) + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 25_725_000 picoseconds. + Weight::from_parts(26_174_000, 0) + .saturating_add(Weight::from_parts(0, 3612)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 17_609_000 picoseconds. - Weight::from_parts(18_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `250` + // Estimated: `6196` + // Minimum execution time: 113_140_000 picoseconds. + Weight::from_parts(116_204_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 17_007_000 picoseconds. - Weight::from_parts(17_471_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `302` + // Estimated: `6196` + // Minimum execution time: 108_571_000 picoseconds. + Weight::from_parts(110_650_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `250` + // Estimated: `6196` + // Minimum execution time: 111_836_000 picoseconds. + Weight::from_parts(114_435_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -104,8 +142,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_444_000 picoseconds. - Weight::from_parts(7_671_000, 0) + // Minimum execution time: 7_160_000 picoseconds. + Weight::from_parts(7_477_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -113,8 +151,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_126_000 picoseconds. - Weight::from_parts(2_253_000, 0) + // Minimum execution time: 1_934_000 picoseconds. + Weight::from_parts(2_053_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -133,11 +171,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 31_318_000 picoseconds. - Weight::from_parts(32_413_000, 0) - .saturating_add(Weight::from_parts(0, 3574)) + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 31_123_000 picoseconds. + Weight::from_parts(31_798_000, 0) + .saturating_add(Weight::from_parts(0, 3612)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -155,11 +193,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `289` - // Estimated: `3754` - // Minimum execution time: 35_282_000 picoseconds. - Weight::from_parts(35_969_000, 0) - .saturating_add(Weight::from_parts(0, 3754)) + // Measured: `327` + // Estimated: `3792` + // Minimum execution time: 35_175_000 picoseconds. + Weight::from_parts(36_098_000, 0) + .saturating_add(Weight::from_parts(0, 3792)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -169,45 +207,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_247_000 picoseconds. - Weight::from_parts(2_381_000, 0) + // Minimum execution time: 1_974_000 picoseconds. + Weight::from_parts(2_096_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmPallet::SupportedVersion` (r:4 w:2) + /// Storage: `XcmPallet::SupportedVersion` (r:5 w:2) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `26` - // Estimated: `10916` - // Minimum execution time: 14_512_000 picoseconds. - Weight::from_parts(15_042_000, 0) - .saturating_add(Weight::from_parts(0, 10916)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `22` + // Estimated: `13387` + // Minimum execution time: 16_626_000 picoseconds. + Weight::from_parts(17_170_000, 0) + .saturating_add(Weight::from_parts(0, 13387)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifiers` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifiers` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `30` - // Estimated: `10920` - // Minimum execution time: 14_659_000 picoseconds. - Weight::from_parts(15_164_000, 0) - .saturating_add(Weight::from_parts(0, 10920)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `26` + // Estimated: `13391` + // Minimum execution time: 16_937_000 picoseconds. + Weight::from_parts(17_447_000, 0) + .saturating_add(Weight::from_parts(0, 13391)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `40` - // Estimated: `13405` - // Minimum execution time: 16_261_000 picoseconds. - Weight::from_parts(16_986_000, 0) - .saturating_add(Weight::from_parts(0, 13405)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15880` + // Minimum execution time: 19_157_000 picoseconds. + Weight::from_parts(19_659_000, 0) + .saturating_add(Weight::from_parts(0, 15880)) + .saturating_add(T::DbWeight::get().reads(6)) } /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -221,38 +259,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `6085` - // Minimum execution time: 30_539_000 picoseconds. - Weight::from_parts(31_117_000, 0) - .saturating_add(Weight::from_parts(0, 6085)) + // Measured: `183` + // Estimated: `6123` + // Minimum execution time: 30_699_000 picoseconds. + Weight::from_parts(31_537_000, 0) + .saturating_add(Weight::from_parts(0, 6123)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:3 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: // Measured: `69` - // Estimated: `8484` - // Minimum execution time: 9_463_000 picoseconds. - Weight::from_parts(9_728_000, 0) - .saturating_add(Weight::from_parts(0, 8484)) - .saturating_add(T::DbWeight::get().reads(3)) + // Estimated: `10959` + // Minimum execution time: 12_303_000 picoseconds. + Weight::from_parts(12_670_000, 0) + .saturating_add(Weight::from_parts(0, 10959)) + .saturating_add(T::DbWeight::get().reads(4)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `37` - // Estimated: `10927` - // Minimum execution time: 15_169_000 picoseconds. - Weight::from_parts(15_694_000, 0) - .saturating_add(Weight::from_parts(0, 10927)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `33` + // Estimated: `13398` + // Minimum execution time: 17_129_000 picoseconds. + Weight::from_parts(17_668_000, 0) + .saturating_add(Weight::from_parts(0, 13398)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -264,12 +302,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `149` - // Estimated: `11039` - // Minimum execution time: 37_549_000 picoseconds. - Weight::from_parts(38_203_000, 0) - .saturating_add(Weight::from_parts(0, 11039)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `183` + // Estimated: `13548` + // Minimum execution time: 39_960_000 picoseconds. + Weight::from_parts(41_068_000, 0) + .saturating_add(Weight::from_parts(0, 13548)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) @@ -280,8 +318,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_947_000 picoseconds. - Weight::from_parts(3_117_000, 0) + // Minimum execution time: 2_333_000 picoseconds. + Weight::from_parts(2_504_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -292,10 +330,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 24_595_000 picoseconds. - Weight::from_parts(24_907_000, 0) + // Minimum execution time: 22_932_000 picoseconds. + Weight::from_parts(23_307_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) + /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `23` + // Estimated: `3488` + // Minimum execution time: 34_558_000 picoseconds. + Weight::from_parts(35_299_000, 0) + .saturating_add(Weight::from_parts(0, 3488)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index e3ea2fb8c06..ed42f93692b 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -79,6 +79,13 @@ pub trait Config: crate::Config { fn set_up_complex_asset_transfer() -> Option<(Assets, u32, Location, Box)> { None } + + /// Gets an asset that can be handled by the AssetTransactor. + /// + /// Used only in benchmarks. + /// + /// Used, for example, in the benchmark for `claim_assets`. + fn get_asset() -> Asset; } benchmarks! { @@ -341,11 +348,23 @@ benchmarks! { u32::MAX, ).unwrap()).collect::>(); crate::Pallet::::expect_response(query_id, Response::PalletsInfo(infos.try_into().unwrap())); - }: { as QueryHandler>::take_response(query_id); } + claim_assets { + let claim_origin = RawOrigin::Signed(whitelisted_caller()); + let claim_location = T::ExecuteXcmOrigin::try_origin(claim_origin.clone().into()).map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + let asset: Asset = T::get_asset(); + // Trap assets for claiming later + crate::Pallet::::drop_assets( + &claim_location, + asset.clone().into(), + &XcmContext { origin: None, message_id: [0u8; 32], topic: None } + ); + let versioned_assets = VersionedAssets::V4(asset.into()); + }: _>(claim_origin.into(), Box::new(versioned_assets), Box::new(VersionedLocation::V4(claim_location))) + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext_with_balances(Vec::new()), diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 5e1a3e55f9b..1a1f8b402a3 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -85,6 +85,7 @@ pub trait WeightInfo { fn migrate_and_notify_old_targets() -> Weight; fn new_query() -> Weight; fn take_response() -> Weight; + fn claim_assets() -> Weight; } /// fallback implementation @@ -165,6 +166,10 @@ impl WeightInfo for TestWeightInfo { fn take_response() -> Weight { Weight::from_parts(100_000_000, 0) } + + fn claim_assets() -> Weight { + Weight::from_parts(100_000_000, 0) + } } #[frame_support::pallet] @@ -1386,6 +1391,64 @@ pub mod pallet { weight_limit, ) } + + /// Claims assets trapped on this pallet because of leftover assets during XCM execution. + /// + /// - `origin`: Anyone can call this extrinsic. + /// - `assets`: The exact assets that were trapped. Use the version to specify what version + /// was the latest when they were trapped. + /// - `beneficiary`: The location/account where the claimed assets will be deposited. + #[pallet::call_index(12)] + #[pallet::weight({ + let assets_version = assets.identify_version(); + let maybe_assets: Result = (*assets.clone()).try_into(); + let maybe_beneficiary: Result = (*beneficiary.clone()).try_into(); + match (maybe_assets, maybe_beneficiary) { + (Ok(assets), Ok(beneficiary)) => { + let ticket: Location = GeneralIndex(assets_version as u128).into(); + let mut message = Xcm(vec![ + ClaimAsset { assets: assets.clone(), ticket }, + DepositAsset { assets: AllCounted(assets.len() as u32).into(), beneficiary }, + ]); + T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::claim_assets().saturating_add(w)) + } + _ => Weight::MAX + } + })] + pub fn claim_assets( + origin: OriginFor, + assets: Box, + beneficiary: Box, + ) -> DispatchResult { + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + log::debug!(target: "xcm::pallet_xcm::claim_assets", "origin: {:?}, assets: {:?}, beneficiary: {:?}", origin_location, assets, beneficiary); + // Extract version from `assets`. + let assets_version = assets.identify_version(); + let assets: Assets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; + let number_of_assets = assets.len() as u32; + let beneficiary: Location = + (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; + let ticket: Location = GeneralIndex(assets_version as u128).into(); + let mut message = Xcm(vec![ + ClaimAsset { assets, ticket }, + DepositAsset { assets: AllCounted(number_of_assets).into(), beneficiary }, + ]); + let weight = + T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; + let mut hash = message.using_encoded(sp_io::hashing::blake2_256); + let outcome = T::XcmExecutor::prepare_and_execute( + origin_location, + message, + &mut hash, + weight, + weight, + ); + outcome.ensure_complete().map_err(|error| { + log::error!(target: "xcm::pallet_xcm::claim_assets", "XCM execution failed with error: {:?}", error); + Error::::LocalExecutionIncomplete + })?; + Ok(()) + } } } diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index 17c181387dd..d5ba7312a3a 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -172,7 +172,7 @@ impl SendXcm for TestSendXcm { msg: &mut Option>, ) -> SendResult<(Location, Xcm<()>)> { if FAIL_SEND_XCM.with(|q| *q.borrow()) { - return Err(SendError::Transport("Intentional send failure used in tests")) + return Err(SendError::Transport("Intentional send failure used in tests")); } let pair = (dest.take().unwrap(), msg.take().unwrap()); Ok((pair, Assets::new())) @@ -654,6 +654,10 @@ impl super::benchmarking::Config for Test { }); Some((assets, fee_index as u32, dest, verify)) } + + fn get_asset() -> Asset { + Asset { id: AssetId(Location::here()), fun: Fungible(ExistentialDeposit::get()) } + } } pub(crate) fn last_event() -> RuntimeEvent { diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index 28c7d197443..13022d9a8b1 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -467,6 +467,57 @@ fn trapped_assets_can_be_claimed() { }); } +// Like `trapped_assets_can_be_claimed` but using the `claim_assets` extrinsic. +#[test] +fn claim_assets_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + new_test_ext_with_balances(balances).execute_with(|| { + // First trap some assets. + let trapping_program = + Xcm::builder_unsafe().withdraw_asset((Here, SEND_AMOUNT).into()).build(); + // Even though assets are trapped, the extrinsic returns success. + assert_ok!(XcmPallet::execute( + RuntimeOrigin::signed(ALICE), + Box::new(VersionedXcm::V4(trapping_program)), + BaseXcmWeight::get() * 2, + )); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); + + // Expected `AssetsTrapped` event info. + let source: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + let versioned_assets = VersionedAssets::V4(Assets::from((Here, SEND_AMOUNT))); + let hash = BlakeTwo256::hash_of(&(source.clone(), versioned_assets.clone())); + + // Assets were indeed trapped. + assert_eq!( + last_events(2), + vec![ + RuntimeEvent::XcmPallet(crate::Event::AssetsTrapped { + hash, + origin: source, + assets: versioned_assets + }), + RuntimeEvent::XcmPallet(crate::Event::Attempted { + outcome: Outcome::Complete { used: BaseXcmWeight::get() * 1 } + }) + ], + ); + let trapped = AssetTraps::::iter().collect::>(); + assert_eq!(trapped, vec![(hash, 1)]); + + // Now claim them with the extrinsic. + assert_ok!(XcmPallet::claim_assets( + RuntimeOrigin::signed(ALICE), + Box::new(VersionedAssets::V4((Here, SEND_AMOUNT).into())), + Box::new(VersionedLocation::V4( + AccountId32 { network: None, id: ALICE.clone().into() }.into() + )), + )); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + assert_eq!(AssetTraps::::iter().collect::>(), vec![]); + }); +} + /// Test failure to complete execution reverts intermediate side-effects. /// /// XCM program will withdraw and deposit some assets, then fail execution of a further withdraw. diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index c90ad25c0d8..ba8d726aecf 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -165,6 +165,15 @@ macro_rules! versioned_type { <$v3>::max_encoded_len() } } + impl IdentifyVersion for $n { + fn identify_version(&self) -> Version { + use $n::*; + match self { + V3(_) => v3::VERSION, + V4(_) => v4::VERSION, + } + } + } }; ($(#[$attr:meta])* pub enum $n:ident { @@ -287,6 +296,16 @@ macro_rules! versioned_type { <$v3>::max_encoded_len() } } + impl IdentifyVersion for $n { + fn identify_version(&self) -> Version { + use $n::*; + match self { + V2(_) => v2::VERSION, + V3(_) => v3::VERSION, + V4(_) => v4::VERSION, + } + } + } }; } @@ -493,6 +512,12 @@ pub trait WrapVersion { ) -> Result, ()>; } +/// Used to get the version out of a versioned type. +// TODO(XCMv5): This could be `GetVersion` and we change the current one to `GetVersionFor`. +pub trait IdentifyVersion { + fn identify_version(&self) -> Version; +} + /// Check and return the `Version` that should be used for the `Xcm` datum for the destination /// `Location`, which will interpret it. pub trait GetVersion { @@ -572,9 +597,9 @@ pub type AlwaysLts = AlwaysV4; pub mod prelude { pub use super::{ latest::prelude::*, AlwaysLatest, AlwaysLts, AlwaysV2, AlwaysV3, AlwaysV4, GetVersion, - IntoVersion, Unsupported, Version as XcmVersion, VersionedAsset, VersionedAssetId, - VersionedAssets, VersionedInteriorLocation, VersionedLocation, VersionedResponse, - VersionedXcm, WrapVersion, + IdentifyVersion, IntoVersion, Unsupported, Version as XcmVersion, VersionedAsset, + VersionedAssetId, VersionedAssets, VersionedInteriorLocation, VersionedLocation, + VersionedResponse, VersionedXcm, WrapVersion, }; } diff --git a/prdoc/pr_3403.prdoc b/prdoc/pr_3403.prdoc new file mode 100644 index 00000000000..086e769da3c --- /dev/null +++ b/prdoc/pr_3403.prdoc @@ -0,0 +1,22 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Add `claim_assets` extrinsic to `pallet-xcm`" + +doc: + - audience: Runtime User + description: | + There's a new extrinsic in `pallet-xcm` for claiming assets. + This means that if your assets ever get trapped while teleporting or doing reserve asset transfers, + you can easily claim them by calling this new extrinsic. + - audience: Runtime Dev + description: | + There's a new extrinsic in `pallet-xcm` that needs a new configuration item for its benchmarks. + It's a simple function in `pallet_xcm::benchmarking::Config`, `get_asset`, that returns a valid asset + handled by the AssetTransactor of the chain. + If you're already using `pallet-xcm-benchmarks`, then you already have this function there and can + just copy and paste it. + +crates: + - name: pallet-xcm + - name: staging-xcm -- GitLab From 59b2661444de2a25f2125a831bd786035a9fac4b Mon Sep 17 00:00:00 2001 From: Egor_P Date: Fri, 1 Mar 2024 15:51:53 +0700 Subject: [PATCH 28/86] [Backport] Node version and spec_version bumps and ordering of the prdoc files from 1.8.0 (#3508) This PR backports Node version and `spec_version` bumps to `1.8.0` from the latest release and orders prdoc files related to it. --- .../runtimes/assets/asset-hub-rococo/src/lib.rs | 4 ++-- .../runtimes/assets/asset-hub-westend/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../collectives/collectives-westend/src/lib.rs | 2 +- .../runtimes/contracts/contracts-rococo/src/lib.rs | 2 +- .../runtimes/coretime/coretime-rococo/src/lib.rs | 2 +- .../runtimes/coretime/coretime-westend/src/lib.rs | 2 +- .../runtimes/glutton/glutton-westend/src/lib.rs | 2 +- .../runtimes/people/people-rococo/src/lib.rs | 2 +- .../runtimes/people/people-westend/src/lib.rs | 2 +- .../runtimes/testing/rococo-parachain/src/lib.rs | 2 +- polkadot/node/primitives/src/lib.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 2 +- prdoc/{ => 1.8.0}/pr_1660.prdoc | 0 prdoc/{ => 1.8.0}/pr_2061.prdoc | 0 prdoc/{ => 1.8.0}/pr_2290.prdoc | 0 prdoc/{ => 1.8.0}/pr_2903.prdoc | 0 ...9-async-backing-on-all-testnet-system-chains.prdoc | 0 prdoc/{ => 1.8.0}/pr_3007.prdoc | 0 prdoc/{ => 1.8.0}/pr_3052.prdoc | 0 prdoc/{ => 1.8.0}/pr_3060.prdoc | 0 prdoc/{ => 1.8.0}/pr_3079.prdoc | 0 prdoc/{ => 1.8.0}/pr_3154.prdoc | 0 prdoc/{ => 1.8.0}/pr_3160.prdoc | 0 prdoc/{ => 1.8.0}/pr_3166.prdoc | 0 prdoc/{ => 1.8.0}/pr_3184.prdoc | 0 prdoc/{ => 1.8.0}/pr_3212.prdoc | 0 prdoc/{ => 1.8.0}/pr_3225.prdoc | 0 prdoc/{ => 1.8.0}/pr_3230.prdoc | 0 prdoc/{ => 1.8.0}/pr_3232.prdoc | 0 prdoc/{ => 1.8.0}/pr_3243.prdoc | 0 prdoc/{ => 1.8.0}/pr_3244.prdoc | 0 prdoc/{ => 1.8.0}/pr_3272.prdoc | 0 prdoc/{ => 1.8.0}/pr_3301.prdoc | 0 prdoc/{ => 1.8.0}/pr_3308.prdoc | 0 prdoc/{ => 1.8.0}/pr_3319.prdoc | 0 prdoc/{ => 1.8.0}/pr_3325.prdoc | 0 prdoc/{ => 1.8.0}/pr_3358.prdoc | 0 prdoc/{ => 1.8.0}/pr_3361.prdoc | 0 prdoc/{ => 1.8.0}/pr_3364.prdoc | 0 prdoc/{ => 1.8.0}/pr_3370.prdoc | 0 prdoc/{ => 1.8.0}/pr_3384.prdoc | 0 prdoc/{ => 1.8.0}/pr_3395.prdoc | 0 prdoc/{ => 1.8.0}/pr_3415.prdoc | 0 prdoc/{ => 1.8.0}/pr_3435.prdoc | 0 prdoc/1.8.0/pr_3477.prdoc | 11 +++++++++++ 48 files changed, 27 insertions(+), 16 deletions(-) rename prdoc/{ => 1.8.0}/pr_1660.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_2061.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_2290.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_2903.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_2949-async-backing-on-all-testnet-system-chains.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3007.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3052.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3060.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3079.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3154.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3160.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3166.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3184.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3212.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3225.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3230.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3232.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3243.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3244.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3272.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3301.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3308.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3319.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3325.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3358.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3361.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3364.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3370.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3384.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3395.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3415.prdoc (100%) rename prdoc/{ => 1.8.0}/pr_3435.prdoc (100%) create mode 100644 prdoc/1.8.0/pr_3477.prdoc diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index a64d4bc2365..17a12dd2f6f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -113,7 +113,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 5778ed362ee..1ead2897855 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -110,7 +110,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 14, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index ee472f006fa..0b48d1717fa 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -202,7 +202,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 4, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 60d959b4053..e1344fce63d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -176,7 +176,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 4, diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 9a88ca174df..6bcf98c428f 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -117,7 +117,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("collectives-westend"), impl_name: create_runtime_str!("collectives-westend"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index c73d2225ebd..0668b9a4c7d 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -133,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index d95e68d85c4..cdff371c505 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -133,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-rococo"), impl_name: create_runtime_str!("coretime-rococo"), authoring_version: 1, - spec_version: 1_007_001, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 4b9b180321d..7fa70647992 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -133,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-westend"), impl_name: create_runtime_str!("coretime-westend"), authoring_version: 1, - spec_version: 1_007_001, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index e09ad42ea9f..232a82115a8 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -100,7 +100,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton-westend"), impl_name: create_runtime_str!("glutton-westend"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 28b8062e0e0..2b8cc32f67c 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-rococo"), impl_name: create_runtime_str!("people-rococo"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index b2b32445613..2dc2d06a66b 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -126,7 +126,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-westend"), impl_name: create_runtime_str!("people-westend"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index b69c2341d6b..2b21a12c3ca 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -107,7 +107,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test-parachain"), impl_name: create_runtime_str!("test-parachain"), authoring_version: 1, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 6e3eefbcbe8..d295c21cce1 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -58,7 +58,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.7.0"; +pub const NODE_VERSION: &'static str = "1.8.0"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 48395f45238..96e8c4e0979 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -149,7 +149,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 1_007_000, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 24, diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 4ac2202b8fc..b55d68ee0f6 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -150,7 +150,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 1_007_001, + spec_version: 1_008_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 24, diff --git a/prdoc/pr_1660.prdoc b/prdoc/1.8.0/pr_1660.prdoc similarity index 100% rename from prdoc/pr_1660.prdoc rename to prdoc/1.8.0/pr_1660.prdoc diff --git a/prdoc/pr_2061.prdoc b/prdoc/1.8.0/pr_2061.prdoc similarity index 100% rename from prdoc/pr_2061.prdoc rename to prdoc/1.8.0/pr_2061.prdoc diff --git a/prdoc/pr_2290.prdoc b/prdoc/1.8.0/pr_2290.prdoc similarity index 100% rename from prdoc/pr_2290.prdoc rename to prdoc/1.8.0/pr_2290.prdoc diff --git a/prdoc/pr_2903.prdoc b/prdoc/1.8.0/pr_2903.prdoc similarity index 100% rename from prdoc/pr_2903.prdoc rename to prdoc/1.8.0/pr_2903.prdoc diff --git a/prdoc/pr_2949-async-backing-on-all-testnet-system-chains.prdoc b/prdoc/1.8.0/pr_2949-async-backing-on-all-testnet-system-chains.prdoc similarity index 100% rename from prdoc/pr_2949-async-backing-on-all-testnet-system-chains.prdoc rename to prdoc/1.8.0/pr_2949-async-backing-on-all-testnet-system-chains.prdoc diff --git a/prdoc/pr_3007.prdoc b/prdoc/1.8.0/pr_3007.prdoc similarity index 100% rename from prdoc/pr_3007.prdoc rename to prdoc/1.8.0/pr_3007.prdoc diff --git a/prdoc/pr_3052.prdoc b/prdoc/1.8.0/pr_3052.prdoc similarity index 100% rename from prdoc/pr_3052.prdoc rename to prdoc/1.8.0/pr_3052.prdoc diff --git a/prdoc/pr_3060.prdoc b/prdoc/1.8.0/pr_3060.prdoc similarity index 100% rename from prdoc/pr_3060.prdoc rename to prdoc/1.8.0/pr_3060.prdoc diff --git a/prdoc/pr_3079.prdoc b/prdoc/1.8.0/pr_3079.prdoc similarity index 100% rename from prdoc/pr_3079.prdoc rename to prdoc/1.8.0/pr_3079.prdoc diff --git a/prdoc/pr_3154.prdoc b/prdoc/1.8.0/pr_3154.prdoc similarity index 100% rename from prdoc/pr_3154.prdoc rename to prdoc/1.8.0/pr_3154.prdoc diff --git a/prdoc/pr_3160.prdoc b/prdoc/1.8.0/pr_3160.prdoc similarity index 100% rename from prdoc/pr_3160.prdoc rename to prdoc/1.8.0/pr_3160.prdoc diff --git a/prdoc/pr_3166.prdoc b/prdoc/1.8.0/pr_3166.prdoc similarity index 100% rename from prdoc/pr_3166.prdoc rename to prdoc/1.8.0/pr_3166.prdoc diff --git a/prdoc/pr_3184.prdoc b/prdoc/1.8.0/pr_3184.prdoc similarity index 100% rename from prdoc/pr_3184.prdoc rename to prdoc/1.8.0/pr_3184.prdoc diff --git a/prdoc/pr_3212.prdoc b/prdoc/1.8.0/pr_3212.prdoc similarity index 100% rename from prdoc/pr_3212.prdoc rename to prdoc/1.8.0/pr_3212.prdoc diff --git a/prdoc/pr_3225.prdoc b/prdoc/1.8.0/pr_3225.prdoc similarity index 100% rename from prdoc/pr_3225.prdoc rename to prdoc/1.8.0/pr_3225.prdoc diff --git a/prdoc/pr_3230.prdoc b/prdoc/1.8.0/pr_3230.prdoc similarity index 100% rename from prdoc/pr_3230.prdoc rename to prdoc/1.8.0/pr_3230.prdoc diff --git a/prdoc/pr_3232.prdoc b/prdoc/1.8.0/pr_3232.prdoc similarity index 100% rename from prdoc/pr_3232.prdoc rename to prdoc/1.8.0/pr_3232.prdoc diff --git a/prdoc/pr_3243.prdoc b/prdoc/1.8.0/pr_3243.prdoc similarity index 100% rename from prdoc/pr_3243.prdoc rename to prdoc/1.8.0/pr_3243.prdoc diff --git a/prdoc/pr_3244.prdoc b/prdoc/1.8.0/pr_3244.prdoc similarity index 100% rename from prdoc/pr_3244.prdoc rename to prdoc/1.8.0/pr_3244.prdoc diff --git a/prdoc/pr_3272.prdoc b/prdoc/1.8.0/pr_3272.prdoc similarity index 100% rename from prdoc/pr_3272.prdoc rename to prdoc/1.8.0/pr_3272.prdoc diff --git a/prdoc/pr_3301.prdoc b/prdoc/1.8.0/pr_3301.prdoc similarity index 100% rename from prdoc/pr_3301.prdoc rename to prdoc/1.8.0/pr_3301.prdoc diff --git a/prdoc/pr_3308.prdoc b/prdoc/1.8.0/pr_3308.prdoc similarity index 100% rename from prdoc/pr_3308.prdoc rename to prdoc/1.8.0/pr_3308.prdoc diff --git a/prdoc/pr_3319.prdoc b/prdoc/1.8.0/pr_3319.prdoc similarity index 100% rename from prdoc/pr_3319.prdoc rename to prdoc/1.8.0/pr_3319.prdoc diff --git a/prdoc/pr_3325.prdoc b/prdoc/1.8.0/pr_3325.prdoc similarity index 100% rename from prdoc/pr_3325.prdoc rename to prdoc/1.8.0/pr_3325.prdoc diff --git a/prdoc/pr_3358.prdoc b/prdoc/1.8.0/pr_3358.prdoc similarity index 100% rename from prdoc/pr_3358.prdoc rename to prdoc/1.8.0/pr_3358.prdoc diff --git a/prdoc/pr_3361.prdoc b/prdoc/1.8.0/pr_3361.prdoc similarity index 100% rename from prdoc/pr_3361.prdoc rename to prdoc/1.8.0/pr_3361.prdoc diff --git a/prdoc/pr_3364.prdoc b/prdoc/1.8.0/pr_3364.prdoc similarity index 100% rename from prdoc/pr_3364.prdoc rename to prdoc/1.8.0/pr_3364.prdoc diff --git a/prdoc/pr_3370.prdoc b/prdoc/1.8.0/pr_3370.prdoc similarity index 100% rename from prdoc/pr_3370.prdoc rename to prdoc/1.8.0/pr_3370.prdoc diff --git a/prdoc/pr_3384.prdoc b/prdoc/1.8.0/pr_3384.prdoc similarity index 100% rename from prdoc/pr_3384.prdoc rename to prdoc/1.8.0/pr_3384.prdoc diff --git a/prdoc/pr_3395.prdoc b/prdoc/1.8.0/pr_3395.prdoc similarity index 100% rename from prdoc/pr_3395.prdoc rename to prdoc/1.8.0/pr_3395.prdoc diff --git a/prdoc/pr_3415.prdoc b/prdoc/1.8.0/pr_3415.prdoc similarity index 100% rename from prdoc/pr_3415.prdoc rename to prdoc/1.8.0/pr_3415.prdoc diff --git a/prdoc/pr_3435.prdoc b/prdoc/1.8.0/pr_3435.prdoc similarity index 100% rename from prdoc/pr_3435.prdoc rename to prdoc/1.8.0/pr_3435.prdoc diff --git a/prdoc/1.8.0/pr_3477.prdoc b/prdoc/1.8.0/pr_3477.prdoc new file mode 100644 index 00000000000..26e96d3635b --- /dev/null +++ b/prdoc/1.8.0/pr_3477.prdoc @@ -0,0 +1,11 @@ +title: Allow parachain which acquires multiple coretime cores to make progress + +doc: + - audience: Node Operator + description: | + Adds the needed changes so that parachains which acquire multiple coretime cores can still make progress. + Only one of the cores will be able to be occupied at a time. + Only works if the ElasticScalingMVP node feature is enabled in the runtime and the block author validator is + updated to include this change. + +crates: [ ] -- GitLab From a1b57a8c181bc39b254ff4d6d774f6ff348e509d Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 1 Mar 2024 13:19:17 +0200 Subject: [PATCH 29/86] Fix crash of synced parachain node run with `--sync=warp` (#3523) Fixes https://github.com/paritytech/polkadot-sdk/issues/3496. --- prdoc/pr_3523.prdoc | 19 ++++ substrate/client/network/sync/src/engine.rs | 2 + substrate/client/network/sync/src/strategy.rs | 88 +++++++++++++++++-- 3 files changed, 101 insertions(+), 8 deletions(-) create mode 100644 prdoc/pr_3523.prdoc diff --git a/prdoc/pr_3523.prdoc b/prdoc/pr_3523.prdoc new file mode 100644 index 00000000000..c31d9d096dc --- /dev/null +++ b/prdoc/pr_3523.prdoc @@ -0,0 +1,19 @@ +title: Fix crash of synced parachain node run with `--sync=warp` + +doc: + - audience: Node Operator + description: | + Fix crash of `SyncingEngine` when an already synced parachain node is run with `--sync=warp` + (issue https://github.com/paritytech/polkadot-sdk/issues/3496). + The issue manifests itself by errors in the logs: + ``` + [Parachain] Cannot set warp sync target block: no warp sync strategy is active. + [Parachain] Failed to set warp sync target block header, terminating `SyncingEngine`. + ``` + Followed by a stream of messages: + ``` + [Parachain] Protocol command streams have been shut down + ``` + +crates: + - name: sc-network-sync diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index c1a7009eeb0..24640fdc455 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -656,6 +656,8 @@ where Some(event) => self.process_notification_event(event), None => return, }, + // TODO: setting of warp sync target block should be moved to the initialization of + // `SyncingEngine`, see https://github.com/paritytech/polkadot-sdk/issues/3537. warp_target_block_header = &mut self.warp_sync_target_block_header_rx_fused => { if let Err(_) = self.pass_warp_sync_target_block_header(warp_target_block_header) { error!( diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 7d6e6a8d3b8..dabcf37ae63 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -30,7 +30,7 @@ use crate::{ }; use chain_sync::{ChainSync, ChainSyncAction, ChainSyncMode}; use libp2p::PeerId; -use log::{debug, error, info}; +use log::{debug, error, info, warn}; use prometheus_endpoint::Registry; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; @@ -159,7 +159,7 @@ impl From> for SyncingAction { /// Proxy to specific syncing strategies. pub struct SyncingStrategy { - /// Syncing configuration. + /// Initial syncing configuration. config: SyncingConfig, /// Client used by syncing strategies. client: Arc, @@ -466,15 +466,27 @@ where &mut self, target_header: B::Header, ) -> Result<(), ()> { - match self.warp { - Some(ref mut warp) => { - warp.set_target_block(target_header); - Ok(()) + match self.config.mode { + SyncMode::Warp => match self.warp { + Some(ref mut warp) => { + warp.set_target_block(target_header); + Ok(()) + }, + None => { + // As mode is set to warp sync, but no warp sync strategy is active, this means + // that warp sync has already finished / was skipped. + warn!( + target: LOG_TARGET, + "Discarding warp sync target, as warp sync was seemingly skipped due \ + to node being (partially) synced.", + ); + Ok(()) + }, }, - None => { + _ => { error!( target: LOG_TARGET, - "Cannot set warp sync target block: no warp sync strategy is active." + "Cannot set warp sync target block: not in warp sync mode." ); debug_assert!(false); Err(()) @@ -587,3 +599,63 @@ where } } } + +#[cfg(test)] +mod test { + use super::*; + use futures::executor::block_on; + use sc_block_builder::BlockBuilderBuilder; + use substrate_test_runtime_client::{ + ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClientBuilder, + TestClientBuilderExt, + }; + + /// Regression test for crash when starting already synced parachain node with `--sync=warp`. + /// We must remove this after setting of warp sync target block is moved to initialization of + /// `SyncingEngine` (issue https://github.com/paritytech/polkadot-sdk/issues/3537). + #[test] + fn set_target_block_finished_warp_sync() { + // Populate database with finalized state. + let mut client = Arc::new(TestClientBuilder::new().build()); + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + let just = (*b"TEST", Vec::new()); + client.finalize_block(block.hash(), Some(just)).unwrap(); + let target_block = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().best_hash) + .with_parent_block_number(client.chain_info().best_number) + .build() + .unwrap() + .build() + .unwrap() + .block; + + // Initialize syncing strategy. + let config = SyncingConfig { + mode: SyncMode::Warp, + max_parallel_downloads: 3, + max_blocks_per_request: 64, + metrics_registry: None, + }; + let mut strategy = + SyncingStrategy::new(config, client, Some(WarpSyncConfig::WaitForTarget)).unwrap(); + + // Warp sync instantly finishes as we have finalized state in DB. + let actions = strategy.actions().unwrap(); + assert_eq!(actions.len(), 1); + assert!(matches!(actions[0], SyncingAction::Finished)); + assert!(strategy.warp.is_none()); + + // Try setting the target block. We mustn't crash. + strategy + .set_warp_sync_target_block_header(target_block.header().clone()) + .unwrap(); + } +} -- GitLab From 6f81a4a09270a5930b8786e2dc765347078b4d4a Mon Sep 17 00:00:00 2001 From: Xiliang Chen Date: Sat, 2 Mar 2024 01:23:21 +1300 Subject: [PATCH 30/86] make SelfParaId a metadata constant (#3517) expose para id via metadata #2116 is blocked by this --- cumulus/pallets/parachain-system/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 3439227bc5b..81965a2a313 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -205,6 +205,7 @@ pub mod pallet { type OnSystemEvent: OnSystemEvent; /// Returns the parachain ID we are running with. + #[pallet::constant] type SelfParaId: Get; /// The place where outbound XCMP messages come from. This is queried in `finalize_block`. -- GitLab From f0e589d72e5228472c7658c525e0ff80d122d777 Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Fri, 1 Mar 2024 15:30:43 +0100 Subject: [PATCH 31/86] subsystem-bench: add regression tests for availability read and write (#3311) ### What's been done - `subsystem-bench` has been split into two parts: a cli benchmark runner and a library. - The cli runner is quite simple. It just allows us to run `.yaml` based test sequences. Now it should only be used to run benchmarks during development. - The library is used in the cli runner and in regression tests. Some code is changed to make the library independent of the runner. - Added first regression tests for availability read and write that replicate existing test sequences. ### How we run regression tests - Regression tests are simply rust integration tests without the harnesses. - They should only be compiled under the `subsystem-benchmarks` feature to prevent them from running with other tests. - This doesn't work when running tests with `nextest` in CI, so additional filters have been added to the `nextest` runs. - Each benchmark run takes a different time in the beginning, so we "warm up" the tests until their CPU usage differs by only 1%. - After the warm-up, we run the benchmarks a few more times and compare the average with the exception using a precision. ### What is still wrong? - I haven't managed to set up approval voting tests. The spread of their results is too large and can't be narrowed down in a reasonable amount of time in the warm-up phase. - The tests start an unconfigurable prometheus endpoint inside, which causes errors because they use the same 9999 port. I disable it with a flag, but I think it's better to extract the endpoint launching outside the test, as we already do with `valgrind` and `pyroscope`. But we still use `prometheus` inside the tests. ### Future work * https://github.com/paritytech/polkadot-sdk/issues/3528 * https://github.com/paritytech/polkadot-sdk/issues/3529 * https://github.com/paritytech/polkadot-sdk/issues/3530 * https://github.com/paritytech/polkadot-sdk/issues/3531 --------- Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> --- .gitlab/pipeline/test.yml | 3 +- Cargo.lock | 2 + .../availability-distribution/Cargo.toml | 11 ++ ...ilability-distribution-regression-bench.rs | 113 +++++++++++++ .../network/availability-recovery/Cargo.toml | 7 + .../availability-recovery-regression-bench.rs | 103 ++++++++++++ polkadot/node/subsystem-bench/Cargo.toml | 6 +- polkadot/node/subsystem-bench/README.md | 146 +++++++++-------- .../examples/approvals_no_shows.yaml | 6 +- .../examples/approvals_throughput.yaml | 3 +- .../approvals_throughput_best_case.yaml | 2 +- ...s_throughput_no_optimisations_enabled.yaml | 2 +- .../subsystem-bench/src/availability/cli.rs | 37 ----- .../src/{ => cli}/subsystem-bench.rs | 149 ++++++++---------- .../subsystem-bench/src/{ => cli}/valgrind.rs | 0 .../src/{ => lib}/approval/helpers.rs | 2 +- .../{ => lib}/approval/message_generator.rs | 19 +-- .../approval/mock_chain_selection.rs | 0 .../src/{ => lib}/approval/mod.rs | 50 +++--- .../src/{ => lib}/approval/test_message.rs | 5 +- .../availability/av_store_helpers.rs | 2 +- .../src/{ => lib}/availability/mod.rs | 70 ++++---- .../src/{core => lib}/configuration.rs | 113 ++++++------- .../src/{core => lib}/display.rs | 28 ++-- .../src/{core => lib}/environment.rs | 78 +++------ .../src/{core => lib}/keyring.rs | 0 .../src/{core/mod.rs => lib/lib.rs} | 7 +- .../src/{core => lib}/mock/av_store.rs | 2 +- .../src/{core => lib}/mock/chain_api.rs | 0 .../src/{core => lib}/mock/dummy.rs | 0 .../src/{core => lib}/mock/mod.rs | 4 +- .../src/{core => lib}/mock/network_bridge.rs | 2 +- .../src/{core => lib}/mock/runtime_api.rs | 2 +- .../src/{core => lib}/network.rs | 2 +- .../node/subsystem-bench/src/lib/usage.rs | 146 +++++++++++++++++ 35 files changed, 711 insertions(+), 411 deletions(-) create mode 100644 polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs create mode 100644 polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs delete mode 100644 polkadot/node/subsystem-bench/src/availability/cli.rs rename polkadot/node/subsystem-bench/src/{ => cli}/subsystem-bench.rs (62%) rename polkadot/node/subsystem-bench/src/{ => cli}/valgrind.rs (100%) rename polkadot/node/subsystem-bench/src/{ => lib}/approval/helpers.rs (99%) rename polkadot/node/subsystem-bench/src/{ => lib}/approval/message_generator.rs (97%) rename polkadot/node/subsystem-bench/src/{ => lib}/approval/mock_chain_selection.rs (100%) rename polkadot/node/subsystem-bench/src/{ => lib}/approval/mod.rs (97%) rename polkadot/node/subsystem-bench/src/{ => lib}/approval/test_message.rs (98%) rename polkadot/node/subsystem-bench/src/{ => lib}/availability/av_store_helpers.rs (94%) rename polkadot/node/subsystem-bench/src/{ => lib}/availability/mod.rs (94%) rename polkadot/node/subsystem-bench/src/{core => lib}/configuration.rs (83%) rename polkadot/node/subsystem-bench/src/{core => lib}/display.rs (89%) rename polkadot/node/subsystem-bench/src/{core => lib}/environment.rs (88%) rename polkadot/node/subsystem-bench/src/{core => lib}/keyring.rs (100%) rename polkadot/node/subsystem-bench/src/{core/mod.rs => lib/lib.rs} (88%) rename polkadot/node/subsystem-bench/src/{core => lib}/mock/av_store.rs (99%) rename polkadot/node/subsystem-bench/src/{core => lib}/mock/chain_api.rs (100%) rename polkadot/node/subsystem-bench/src/{core => lib}/mock/dummy.rs (100%) rename polkadot/node/subsystem-bench/src/{core => lib}/mock/mod.rs (97%) rename polkadot/node/subsystem-bench/src/{core => lib}/mock/network_bridge.rs (99%) rename polkadot/node/subsystem-bench/src/{core => lib}/mock/runtime_api.rs (99%) rename polkadot/node/subsystem-bench/src/{core => lib}/network.rs (99%) create mode 100644 polkadot/node/subsystem-bench/src/lib/usage.rs diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 9f774aab271..5c41a3e6e08 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -25,6 +25,7 @@ test-linux-stable: # "upgrade_version_checks_should_work" is currently failing - | time cargo nextest run \ + --filter-expr 'not deps(/polkadot-subsystem-bench/)' \ --workspace \ --locked \ --release \ @@ -69,7 +70,7 @@ test-linux-stable-runtime-benchmarks: # but still want to have debug assertions. RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" script: - - time cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet + - time cargo nextest run --filter-expr 'not deps(/polkadot-subsystem-bench/)' --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet # can be used to run all tests # test-linux-stable-all: diff --git a/Cargo.lock b/Cargo.lock index 991541ee84e..b12006bbcf7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12269,6 +12269,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "polkadot-subsystem-bench", "rand", "sc-network", "schnellru", @@ -12300,6 +12301,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "polkadot-subsystem-bench", "rand", "sc-network", "schnellru", diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 432501ed23f..182d92cb163 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -36,3 +36,14 @@ sc-network = { path = "../../../../substrate/client/network" } futures-timer = "3.0.2" assert_matches = "1.4.0" polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +polkadot-subsystem-bench = { path = "../../subsystem-bench" } + + +[[test]] +name = "availability-distribution-regression-bench" +path = "tests/availability-distribution-regression-bench.rs" +harness = false +required-features = ["subsystem-benchmarks"] + +[features] +subsystem-benchmarks = [] diff --git a/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs b/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs new file mode 100644 index 00000000000..f2872f3c72b --- /dev/null +++ b/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs @@ -0,0 +1,113 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! availability-read regression tests +//! +//! TODO: Explain the test case after configuration adjusted to Kusama +//! +//! Subsystems involved: +//! - availability-distribution +//! - bitfield-distribution +//! - availability-store + +use polkadot_subsystem_bench::{ + availability::{benchmark_availability_write, prepare_test, TestDataAvailability, TestState}, + configuration::{PeerLatency, TestConfiguration}, + usage::BenchmarkUsage, +}; + +const BENCH_COUNT: usize = 3; +const WARM_UP_COUNT: usize = 20; +const WARM_UP_PRECISION: f64 = 0.01; + +fn main() -> Result<(), String> { + let mut messages = vec![]; + + // TODO: Adjust the test configurations to Kusama values + let mut config = TestConfiguration::default(); + config.latency = Some(PeerLatency { mean_latency_ms: 30, std_dev: 2.0 }); + config.n_validators = 1000; + config.n_cores = 200; + config.max_validators_per_core = 5; + config.min_pov_size = 5120; + config.max_pov_size = 5120; + config.peer_bandwidth = 52428800; + config.bandwidth = 52428800; + config.connectivity = 75; + config.num_blocks = 3; + config.generate_pov_sizes(); + + warm_up(config.clone())?; + let usage = benchmark(config.clone()); + + messages.extend(usage.check_network_usage(&[ + ("Received from peers", 4330.0, 0.05), + ("Sent to peers", 15900.0, 0.05), + ])); + messages.extend(usage.check_cpu_usage(&[ + ("availability-distribution", 0.025, 0.05), + ("bitfield-distribution", 0.085, 0.05), + ("availability-store", 0.180, 0.05), + ])); + + if messages.is_empty() { + Ok(()) + } else { + eprintln!("{}", messages.join("\n")); + Err("Regressions found".to_string()) + } +} + +fn warm_up(config: TestConfiguration) -> Result<(), String> { + println!("Warming up..."); + let mut prev_run: Option = None; + for _ in 0..WARM_UP_COUNT { + let curr = run(config.clone()); + if let Some(ref prev) = prev_run { + let av_distr_diff = + curr.cpu_usage_diff(prev, "availability-distribution").expect("Must exist"); + let bitf_distr_diff = + curr.cpu_usage_diff(prev, "bitfield-distribution").expect("Must exist"); + let av_store_diff = + curr.cpu_usage_diff(prev, "availability-store").expect("Must exist"); + if av_distr_diff < WARM_UP_PRECISION && + bitf_distr_diff < WARM_UP_PRECISION && + av_store_diff < WARM_UP_PRECISION + { + return Ok(()) + } + } + prev_run = Some(curr); + } + + Err("Can't warm up".to_string()) +} + +fn benchmark(config: TestConfiguration) -> BenchmarkUsage { + println!("Benchmarking..."); + let usages: Vec = (0..BENCH_COUNT).map(|_| run(config.clone())).collect(); + let usage = BenchmarkUsage::average(&usages); + println!("{}", usage); + usage +} + +fn run(config: TestConfiguration) -> BenchmarkUsage { + let mut state = TestState::new(&config); + let (mut env, _protocol_config) = + prepare_test(config.clone(), &mut state, TestDataAvailability::Write, false); + env.runtime() + .block_on(benchmark_availability_write("data_availability_write", &mut env, state)) +} diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 9eddf5c86d2..12b6ce7a057 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -41,6 +41,13 @@ sc-network = { path = "../../../../substrate/client/network" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +polkadot-subsystem-bench = { path = "../../subsystem-bench" } + +[[test]] +name = "availability-recovery-regression-bench" +path = "tests/availability-recovery-regression-bench.rs" +harness = false +required-features = ["subsystem-benchmarks"] [features] subsystem-benchmarks = [] diff --git a/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs new file mode 100644 index 00000000000..beb063e7ae0 --- /dev/null +++ b/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs @@ -0,0 +1,103 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! availability-write regression tests +//! +//! TODO: Explain the test case after configuration adjusted to Kusama +//! +//! Subsystems involved: +//! - availability-recovery + +use polkadot_subsystem_bench::{ + availability::{ + benchmark_availability_read, prepare_test, DataAvailabilityReadOptions, + TestDataAvailability, TestState, + }, + configuration::{PeerLatency, TestConfiguration}, + usage::BenchmarkUsage, +}; + +const BENCH_COUNT: usize = 3; +const WARM_UP_COUNT: usize = 10; +const WARM_UP_PRECISION: f64 = 0.01; + +fn main() -> Result<(), String> { + let mut messages = vec![]; + + // TODO: Adjust the test configurations to Kusama values + let options = DataAvailabilityReadOptions { fetch_from_backers: true }; + let mut config = TestConfiguration::default(); + config.latency = Some(PeerLatency { mean_latency_ms: 100, std_dev: 1.0 }); + config.n_validators = 300; + config.n_cores = 20; + config.min_pov_size = 5120; + config.max_pov_size = 5120; + config.peer_bandwidth = 52428800; + config.bandwidth = 52428800; + config.num_blocks = 3; + config.connectivity = 90; + config.generate_pov_sizes(); + + warm_up(config.clone(), options.clone())?; + let usage = benchmark(config.clone(), options.clone()); + + messages.extend(usage.check_network_usage(&[ + ("Received from peers", 102400.000, 0.05), + ("Sent to peers", 0.335, 0.05), + ])); + messages.extend(usage.check_cpu_usage(&[("availability-recovery", 3.850, 0.05)])); + + if messages.is_empty() { + Ok(()) + } else { + eprintln!("{}", messages.join("\n")); + Err("Regressions found".to_string()) + } +} + +fn warm_up(config: TestConfiguration, options: DataAvailabilityReadOptions) -> Result<(), String> { + println!("Warming up..."); + let mut prev_run: Option = None; + for _ in 0..WARM_UP_COUNT { + let curr = run(config.clone(), options.clone()); + if let Some(ref prev) = prev_run { + let diff = curr.cpu_usage_diff(prev, "availability-recovery").expect("Must exist"); + if diff < WARM_UP_PRECISION { + return Ok(()) + } + } + prev_run = Some(curr); + } + + Err("Can't warm up".to_string()) +} + +fn benchmark(config: TestConfiguration, options: DataAvailabilityReadOptions) -> BenchmarkUsage { + println!("Benchmarking..."); + let usages: Vec = + (0..BENCH_COUNT).map(|_| run(config.clone(), options.clone())).collect(); + let usage = BenchmarkUsage::average(&usages); + println!("{}", usage); + usage +} + +fn run(config: TestConfiguration, options: DataAvailabilityReadOptions) -> BenchmarkUsage { + let mut state = TestState::new(&config); + let (mut env, _protocol_config) = + prepare_test(config.clone(), &mut state, TestDataAvailability::Read(options), false); + env.runtime() + .block_on(benchmark_availability_read("data_availability_read", &mut env, state)) +} diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index 7de91d8cd5d..726e7de4587 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -8,9 +8,13 @@ license.workspace = true readme = "README.md" publish = false +[lib] +name = "polkadot_subsystem_bench" +path = "src/lib/lib.rs" + [[bin]] name = "subsystem-bench" -path = "src/subsystem-bench.rs" +path = "src/cli/subsystem-bench.rs" # Prevent rustdoc error. Already documented from top-level Cargo.toml. doc = false diff --git a/polkadot/node/subsystem-bench/README.md b/polkadot/node/subsystem-bench/README.md index e090a0392cb..3aac2810ad5 100644 --- a/polkadot/node/subsystem-bench/README.md +++ b/polkadot/node/subsystem-bench/README.md @@ -1,6 +1,6 @@ # Subsystem benchmark client -Run parachain consensus stress and performance tests on your development machine. +Run parachain consensus stress and performance tests on your development machine or in CI. ## Motivation @@ -32,7 +32,8 @@ a local Grafana/Prometheus stack is needed. ### Run Prometheus, Pyroscope and Graphana in Docker -If docker is not usable, then follow the next sections to manually install Prometheus, Pyroscope and Graphana on your machine. +If docker is not usable, then follow the next sections to manually install Prometheus, Pyroscope and Graphana +on your machine. ```bash cd polkadot/node/subsystem-bench/docker @@ -95,39 +96,16 @@ If you are running the servers in Docker, use the following URLs: Follow [this guide](https://grafana.com/docs/grafana/latest/dashboards/manage-dashboards/#export-and-import-dashboards) to import the dashboards from the repository `grafana` folder. -## How to run a test - -To run a test, you need to first choose a test objective. Currently, we support the following: - -``` -target/testnet/subsystem-bench --help -The almighty Subsystem Benchmark Toolℒ️ - -Usage: subsystem-bench [OPTIONS] - -Commands: - data-availability-read Benchmark availability recovery strategies +### Standard test options ``` +$ subsystem-bench --help +Usage: subsystem-bench [OPTIONS] -Note: `test-sequence` is a special test objective that wraps up an arbitrary number of test objectives. It is tipically - used to run a suite of tests defined in a `yaml` file like in this [example](examples/availability_read.yaml). +Arguments: + Path to the test sequence configuration file -### Standard test options - -``` - --network The type of network to be emulated [default: ideal] [possible values: ideal, healthy, - degraded] - --n-cores Number of cores to fetch availability for [default: 100] - --n-validators Number of validators to fetch chunks from [default: 500] - --min-pov-size The minimum pov size in KiB [default: 5120] - --max-pov-size The maximum pov size bytes [default: 5120] - -n, --num-blocks The number of blocks the test is going to run [default: 1] - -p, --peer-bandwidth The bandwidth of emulated remote peers in KiB - -b, --bandwidth The bandwidth of our node in KiB - --connectivity Emulated peer connection ratio [0-100] - --peer-mean-latency Mean remote peer latency in milliseconds [0-5000] - --peer-latency-std-dev Remote peer latency standard deviation +Options: --profile Enable CPU Profiling with Pyroscope --pyroscope-url Pyroscope Server URL [default: http://localhost:4040] --pyroscope-sample-rate Pyroscope Sample Rate [default: 113] @@ -135,27 +113,17 @@ Note: `test-sequence` is a special test objective that wraps up an arbitrary num -h, --help Print help ``` -These apply to all test objectives, except `test-sequence` which relies on the values being specified in a file. - -### Test objectives - -Each test objective can have it's specific configuration options, in contrast with the standard test options. +## How to run a test -For `data-availability-read` the recovery strategy to be used is configurable. +To run a test, you need to use a path to a test objective: ``` -target/testnet/subsystem-bench data-availability-read --help -Benchmark availability recovery strategies - -Usage: subsystem-bench data-availability-read [OPTIONS] - -Options: - -f, --fetch-from-backers Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU - as we don't need to re-construct from chunks. Tipically this is only faster if nodes - have enough bandwidth - -h, --help Print help +target/testnet/subsystem-bench polkadot/node/subsystem-bench/examples/availability_read.yaml ``` +Note: test objectives may be wrapped up into a test sequence. +It is tipically used to run a suite of tests like in this [example](examples/availability_read.yaml). + ### Understanding the test configuration A single test configuration `TestConfiguration` struct applies to a single run of a certain test objective. @@ -175,36 +143,65 @@ the test is started. ### Example run -Let's run an availabilty read test which will recover availability for 10 cores with max PoV size on a 500 +Let's run an availabilty read test which will recover availability for 200 cores with max PoV size on a 1000 node validator network. + + ``` - target/testnet/subsystem-bench --n-cores 10 data-availability-read -[2023-11-28T09:01:59Z INFO subsystem_bench::core::display] n_validators = 500, n_cores = 10, pov_size = 5120 - 5120, - latency = None -[2023-11-28T09:01:59Z INFO subsystem-bench::availability] Generating template candidate index=0 pov_size=5242880 -[2023-11-28T09:01:59Z INFO subsystem-bench::availability] Created test environment. -[2023-11-28T09:01:59Z INFO subsystem-bench::availability] Pre-generating 10 candidates. -[2023-11-28T09:02:01Z INFO subsystem-bench::core] Initializing network emulation for 500 peers. -[2023-11-28T09:02:01Z INFO substrate_prometheus_endpoint] 〽️ Prometheus exporter started at 127.0.0.1:9999 -[2023-11-28T09:02:01Z INFO subsystem-bench::availability] Current block 1/1 -[2023-11-28T09:02:01Z INFO subsystem_bench::availability] 10 recoveries pending -[2023-11-28T09:02:04Z INFO subsystem_bench::availability] Block time 3231ms -[2023-11-28T09:02:04Z INFO subsystem-bench::availability] Sleeping till end of block (2768ms) -[2023-11-28T09:02:07Z INFO subsystem_bench::availability] All blocks processed in 6001ms -[2023-11-28T09:02:07Z INFO subsystem_bench::availability] Throughput: 51200 KiB/block -[2023-11-28T09:02:07Z INFO subsystem_bench::availability] Block time: 6001 ms -[2023-11-28T09:02:07Z INFO subsystem_bench::availability] - - Total received from network: 66 MiB - Total sent to network: 58 KiB - Total subsystem CPU usage 4.16s - CPU usage per block 4.16s - Total test environment CPU usage 0.00s - CPU usage per block 0.00s +target/testnet/subsystem-bench polkadot/node/subsystem-bench/examples/availability_write.yaml +[2024-02-19T14:10:32.981Z INFO subsystem_bench] Sequence contains 1 step(s) +[2024-02-19T14:10:32.981Z INFO subsystem-bench::cli] Step 1/1 +[2024-02-19T14:10:32.981Z INFO subsystem-bench::cli] [objective = DataAvailabilityWrite] n_validators = 1000, n_cores = 200, pov_size = 5120 - 5120, connectivity = 75, latency = Some(PeerLatency { mean_latency_ms: 30, std_dev: 2.0 }) +[2024-02-19T14:10:32.982Z INFO subsystem-bench::availability] Generating template candidate index=0 pov_size=5242880 +[2024-02-19T14:10:33.106Z INFO subsystem-bench::availability] Created test environment. +[2024-02-19T14:10:33.106Z INFO subsystem-bench::availability] Pre-generating 600 candidates. +[2024-02-19T14:10:34.096Z INFO subsystem-bench::network] Initializing emulation for a 1000 peer network. +[2024-02-19T14:10:34.096Z INFO subsystem-bench::network] connectivity 75%, latency Some(PeerLatency { mean_latency_ms: 30, std_dev: 2.0 }) +[2024-02-19T14:10:34.098Z INFO subsystem-bench::network] Network created, connected validator count 749 +[2024-02-19T14:10:34.099Z INFO subsystem-bench::availability] Seeding availability store with candidates ... +[2024-02-19T14:10:34.100Z INFO substrate_prometheus_endpoint] 〽️ Prometheus exporter started at 127.0.0.1:9999 +[2024-02-19T14:10:34.387Z INFO subsystem-bench::availability] Done +[2024-02-19T14:10:34.387Z INFO subsystem-bench::availability] Current block #1 +[2024-02-19T14:10:34.389Z INFO subsystem-bench::availability] Waiting for all emulated peers to receive their chunk from us ... +[2024-02-19T14:10:34.625Z INFO subsystem-bench::availability] All chunks received in 237ms +[2024-02-19T14:10:34.626Z INFO polkadot_subsystem_bench::availability] Waiting for 749 bitfields to be received and processed +[2024-02-19T14:10:35.710Z INFO subsystem-bench::availability] All bitfields processed +[2024-02-19T14:10:35.710Z INFO subsystem-bench::availability] All work for block completed in 1322ms +[2024-02-19T14:10:35.710Z INFO subsystem-bench::availability] Current block #2 +[2024-02-19T14:10:35.712Z INFO subsystem-bench::availability] Waiting for all emulated peers to receive their chunk from us ... +[2024-02-19T14:10:35.947Z INFO subsystem-bench::availability] All chunks received in 236ms +[2024-02-19T14:10:35.947Z INFO polkadot_subsystem_bench::availability] Waiting for 749 bitfields to be received and processed +[2024-02-19T14:10:37.038Z INFO subsystem-bench::availability] All bitfields processed +[2024-02-19T14:10:37.038Z INFO subsystem-bench::availability] All work for block completed in 1328ms +[2024-02-19T14:10:37.039Z INFO subsystem-bench::availability] Current block #3 +[2024-02-19T14:10:37.040Z INFO subsystem-bench::availability] Waiting for all emulated peers to receive their chunk from us ... +[2024-02-19T14:10:37.276Z INFO subsystem-bench::availability] All chunks received in 237ms +[2024-02-19T14:10:37.276Z INFO polkadot_subsystem_bench::availability] Waiting for 749 bitfields to be received and processed +[2024-02-19T14:10:38.362Z INFO subsystem-bench::availability] All bitfields processed +[2024-02-19T14:10:38.362Z INFO subsystem-bench::availability] All work for block completed in 1323ms +[2024-02-19T14:10:38.362Z INFO subsystem-bench::availability] All blocks processed in 3974ms +[2024-02-19T14:10:38.362Z INFO subsystem-bench::availability] Avg block time: 1324 ms +[2024-02-19T14:10:38.362Z INFO parachain::availability-store] received `Conclude` signal, exiting +[2024-02-19T14:10:38.362Z INFO parachain::bitfield-distribution] Conclude +[2024-02-19T14:10:38.362Z INFO subsystem-bench::network] Downlink channel closed, network interface task exiting + +polkadot/node/subsystem-bench/examples/availability_write.yaml #1 DataAvailabilityWrite + +Network usage, KiB total per block +Received from peers 12922.000 4307.333 +Sent to peers 47705.000 15901.667 + +CPU usage, seconds total per block +availability-distribution 0.045 0.015 +bitfield-distribution 0.104 0.035 +availability-store 0.304 0.101 +Test environment 3.213 1.071 ``` -`Block time` in the context of `data-availability-read` has a different meaning. It measures the amount of time it + + +`Block time` in the current context has a different meaning. It measures the amount of time it took the subsystem to finish processing all of the messages sent in the context of the current test block. ### Test logs @@ -233,8 +230,9 @@ Since the execution will be very slow, it's recommended not to run it together w benchmark results into account. A report is saved in a file `cachegrind_report.txt`. Example run results: + ``` -$ target/testnet/subsystem-bench --n-cores 10 --cache-misses data-availability-read +$ target/testnet/subsystem-bench --cache-misses cache-misses-data-availability-read.yaml $ cat cachegrind_report.txt I refs: 64,622,081,485 I1 misses: 3,018,168 @@ -275,7 +273,7 @@ happy and negative scenarios (low bandwidth, network errors and low connectivity To faster write a new test objective you need to use some higher level wrappers and logic: `TestEnvironment`, `TestConfiguration`, `TestAuthorities`, `NetworkEmulator`. To create the `TestEnvironment` you will -need to also build an `Overseer`, but that should be easy using the mockups for subsystems in`core::mock`. +need to also build an `Overseer`, but that should be easy using the mockups for subsystems in `mock`. ### Mocking diff --git a/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml b/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml index 758c7fbbf11..146da57d44c 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml @@ -1,14 +1,14 @@ TestConfiguration: # Test 1 - objective: !ApprovalVoting - last_considered_tranche: 89 coalesce_mean: 3.0 coalesce_std_dev: 1.0 + enable_assignments_v2: true + last_considered_tranche: 89 stop_when_approved: true coalesce_tranche_diff: 12 - workdir_prefix: "/tmp/" - enable_assignments_v2: true num_no_shows_per_candidate: 10 + workdir_prefix: "/tmp/" n_validators: 500 n_cores: 100 min_pov_size: 1120 diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml index 9eeeefc53a4..6b17e62c20a 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml @@ -7,11 +7,10 @@ TestConfiguration: last_considered_tranche: 89 stop_when_approved: false coalesce_tranche_diff: 12 - workdir_prefix: "/tmp" num_no_shows_per_candidate: 0 + workdir_prefix: "/tmp" n_validators: 500 n_cores: 100 - n_included_candidates: 100 min_pov_size: 1120 max_pov_size: 5120 peer_bandwidth: 524288000000 diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml index 370bb31a5c4..e946c28e8ef 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml @@ -7,8 +7,8 @@ TestConfiguration: last_considered_tranche: 89 stop_when_approved: true coalesce_tranche_diff: 12 - workdir_prefix: "/tmp/" num_no_shows_per_candidate: 0 + workdir_prefix: "/tmp/" n_validators: 500 n_cores: 100 min_pov_size: 1120 diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml index 30b9ac8dc50..8f4b050e72f 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput_no_optimisations_enabled.yaml @@ -7,8 +7,8 @@ TestConfiguration: last_considered_tranche: 89 stop_when_approved: false coalesce_tranche_diff: 12 - workdir_prefix: "/tmp/" num_no_shows_per_candidate: 0 + workdir_prefix: "/tmp/" n_validators: 500 n_cores: 100 min_pov_size: 1120 diff --git a/polkadot/node/subsystem-bench/src/availability/cli.rs b/polkadot/node/subsystem-bench/src/availability/cli.rs deleted file mode 100644 index 65df8c1552a..00000000000 --- a/polkadot/node/subsystem-bench/src/availability/cli.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use serde::{Deserialize, Serialize}; - -#[derive(clap::ValueEnum, Clone, Copy, Debug, PartialEq)] -#[value(rename_all = "kebab-case")] -#[non_exhaustive] -pub enum NetworkEmulation { - Ideal, - Healthy, - Degraded, -} - -#[derive(Debug, Clone, Serialize, Deserialize, clap::Parser)] -#[clap(rename_all = "kebab-case")] -#[allow(missing_docs)] -pub struct DataAvailabilityReadOptions { - #[clap(short, long, default_value_t = false)] - /// Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU as - /// we don't need to re-construct from chunks. Tipically this is only faster if nodes have - /// enough bandwidth. - pub fetch_from_backers: bool, -} diff --git a/polkadot/node/subsystem-bench/src/subsystem-bench.rs b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs similarity index 62% rename from polkadot/node/subsystem-bench/src/subsystem-bench.rs rename to polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs index 0803f175474..deb351360d7 100644 --- a/polkadot/node/subsystem-bench/src/subsystem-bench.rs +++ b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs @@ -14,54 +14,32 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! A tool for running subsystem benchmark tests designed for development and -//! CI regression testing. - -use approval::{bench_approvals, ApprovalsOptions}; -use availability::{ - cli::{DataAvailabilityReadOptions, NetworkEmulation}, - prepare_test, TestState, -}; +//! A tool for running subsystem benchmark tests +//! designed for development and CI regression testing. + use clap::Parser; -use clap_num::number_range; use color_eyre::eyre; use colored::Colorize; -use core::{ - configuration::TestConfiguration, - display::display_configuration, - environment::{TestEnvironment, GENESIS_HASH}, -}; +use polkadot_subsystem_bench::{approval, availability, configuration}; use pyroscope::PyroscopeAgent; use pyroscope_pprofrs::{pprof_backend, PprofConfig}; use serde::{Deserialize, Serialize}; use std::path::Path; -mod approval; -mod availability; -mod core; mod valgrind; -const LOG_TARGET: &str = "subsystem-bench"; - -fn le_100(s: &str) -> Result { - number_range(s, 0, 100) -} - -fn le_5000(s: &str) -> Result { - number_range(s, 0, 5000) -} +const LOG_TARGET: &str = "subsystem-bench::cli"; /// Supported test objectives #[derive(Debug, Clone, Parser, Serialize, Deserialize)] #[command(rename_all = "kebab-case")] pub enum TestObjective { /// Benchmark availability recovery strategies. - DataAvailabilityRead(DataAvailabilityReadOptions), + DataAvailabilityRead(availability::DataAvailabilityReadOptions), /// Benchmark availability and bitfield distribution. DataAvailabilityWrite, /// Benchmark the approval-voting and approval-distribution subsystems. - ApprovalVoting(ApprovalsOptions), - Unimplemented, + ApprovalVoting(approval::ApprovalsOptions), } impl std::fmt::Display for TestObjective { @@ -73,39 +51,37 @@ impl std::fmt::Display for TestObjective { Self::DataAvailabilityRead(_) => "DataAvailabilityRead", Self::DataAvailabilityWrite => "DataAvailabilityWrite", Self::ApprovalVoting(_) => "ApprovalVoting", - Self::Unimplemented => "Unimplemented", } ) } } -#[derive(Debug, Parser)] -#[allow(missing_docs)] -struct BenchCli { - #[arg(long, value_enum, ignore_case = true, default_value_t = NetworkEmulation::Ideal)] - /// The type of network to be emulated - pub network: NetworkEmulation, - - #[clap(short, long)] - /// The bandwidth of emulated remote peers in KiB - pub peer_bandwidth: Option, - - #[clap(short, long)] - /// The bandwidth of our node in KiB - pub bandwidth: Option, - - #[clap(long, value_parser=le_100)] - /// Emulated peer connection ratio [0-100]. - pub connectivity: Option, +/// The test input parameters +#[derive(Clone, Debug, Serialize, Deserialize)] +struct CliTestConfiguration { + /// Test Objective + pub objective: TestObjective, + /// Test Configuration + #[serde(flatten)] + pub test_config: configuration::TestConfiguration, +} - #[clap(long, value_parser=le_5000)] - /// Mean remote peer latency in milliseconds [0-5000]. - pub peer_mean_latency: Option, +#[derive(Serialize, Deserialize)] +pub struct TestSequence { + #[serde(rename(serialize = "TestConfiguration", deserialize = "TestConfiguration"))] + test_configurations: Vec, +} - #[clap(long, value_parser=le_5000)] - /// Remote peer latency standard deviation - pub peer_latency_std_dev: Option, +impl TestSequence { + fn new_from_file(path: &Path) -> std::io::Result { + let string = String::from_utf8(std::fs::read(path)?).expect("File is valid UTF8"); + Ok(serde_yaml::from_str(&string).expect("File is valid test sequence YA")) + } +} +#[derive(Debug, Parser)] +#[allow(missing_docs)] +struct BenchCli { #[clap(long, default_value_t = false)] /// Enable CPU Profiling with Pyroscope pub profile: bool, @@ -122,10 +98,6 @@ struct BenchCli { /// Enable Cache Misses Profiling with Valgrind. Linux only, Valgrind must be in the PATH pub cache_misses: bool, - #[clap(long, default_value_t = false)] - /// Shows the output in YAML format - pub yaml_output: bool, - #[arg(required = true)] /// Path to the test sequence configuration file pub path: String, @@ -148,49 +120,60 @@ impl BenchCli { None }; - let test_sequence = core::configuration::TestSequence::new_from_file(Path::new(&self.path)) + let test_sequence = TestSequence::new_from_file(Path::new(&self.path)) .expect("File exists") - .into_vec(); + .test_configurations; let num_steps = test_sequence.len(); gum::info!("{}", format!("Sequence contains {} step(s)", num_steps).bright_purple()); - for (index, test_config) in test_sequence.into_iter().enumerate() { - let benchmark_name = format!("{} #{} {}", &self.path, index + 1, test_config.objective); - gum::info!(target: LOG_TARGET, "{}", format!("Step {}/{}", index + 1, num_steps).bright_purple(),); - display_configuration(&test_config); - let usage = match test_config.objective { - TestObjective::DataAvailabilityRead(ref _opts) => { - let mut state = TestState::new(&test_config); - let (mut env, _protocol_config) = prepare_test(test_config, &mut state); + for (index, CliTestConfiguration { objective, mut test_config }) in + test_sequence.into_iter().enumerate() + { + let benchmark_name = format!("{} #{} {}", &self.path, index + 1, objective); + gum::info!(target: LOG_TARGET, "{}", format!("Step {}/{}", index + 1, num_steps).bright_purple(),); + gum::info!(target: LOG_TARGET, "[{}] {}", format!("objective = {:?}", objective).green(), test_config); + test_config.generate_pov_sizes(); + + let usage = match objective { + TestObjective::DataAvailabilityRead(opts) => { + let mut state = availability::TestState::new(&test_config); + let (mut env, _protocol_config) = availability::prepare_test( + test_config, + &mut state, + availability::TestDataAvailability::Read(opts), + true, + ); env.runtime().block_on(availability::benchmark_availability_read( &benchmark_name, &mut env, state, )) }, - TestObjective::ApprovalVoting(ref options) => { - let (mut env, state) = - approval::prepare_test(test_config.clone(), options.clone()); - env.runtime().block_on(bench_approvals(&benchmark_name, &mut env, state)) - }, TestObjective::DataAvailabilityWrite => { - let mut state = TestState::new(&test_config); - let (mut env, _protocol_config) = prepare_test(test_config, &mut state); + let mut state = availability::TestState::new(&test_config); + let (mut env, _protocol_config) = availability::prepare_test( + test_config, + &mut state, + availability::TestDataAvailability::Write, + true, + ); env.runtime().block_on(availability::benchmark_availability_write( &benchmark_name, &mut env, state, )) }, - TestObjective::Unimplemented => todo!(), - }; - - let output = if self.yaml_output { - serde_yaml::to_string(&vec![usage])? - } else { - usage.to_string() + TestObjective::ApprovalVoting(ref options) => { + let (mut env, state) = + approval::prepare_test(test_config.clone(), options.clone(), true); + env.runtime().block_on(approval::bench_approvals( + &benchmark_name, + &mut env, + state, + )) + }, }; - println!("{}", output); + println!("{}", usage); } if let Some(agent_running) = agent_running { diff --git a/polkadot/node/subsystem-bench/src/valgrind.rs b/polkadot/node/subsystem-bench/src/cli/valgrind.rs similarity index 100% rename from polkadot/node/subsystem-bench/src/valgrind.rs rename to polkadot/node/subsystem-bench/src/cli/valgrind.rs diff --git a/polkadot/node/subsystem-bench/src/approval/helpers.rs b/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs similarity index 99% rename from polkadot/node/subsystem-bench/src/approval/helpers.rs rename to polkadot/node/subsystem-bench/src/lib/approval/helpers.rs index 623d91848f5..af5ff5aa1fa 100644 --- a/polkadot/node/subsystem-bench/src/approval/helpers.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::core::configuration::TestAuthorities; +use crate::configuration::TestAuthorities; use itertools::Itertools; use polkadot_node_core_approval_voting::time::{Clock, SystemClock, Tick}; use polkadot_node_network_protocol::{ diff --git a/polkadot/node/subsystem-bench/src/approval/message_generator.rs b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs similarity index 97% rename from polkadot/node/subsystem-bench/src/approval/message_generator.rs rename to polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs index a7103401324..c1b31a509f6 100644 --- a/polkadot/node/subsystem-bench/src/approval/message_generator.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs @@ -18,15 +18,12 @@ use crate::{ approval::{ helpers::{generate_babe_epoch, generate_topology}, test_message::{MessagesBundle, TestMessageInfo}, - ApprovalTestState, BlockTestData, GeneratedState, BUFFER_FOR_GENERATION_MILLIS, LOG_TARGET, - SLOT_DURATION_MILLIS, + ApprovalTestState, ApprovalsOptions, BlockTestData, GeneratedState, + BUFFER_FOR_GENERATION_MILLIS, LOG_TARGET, SLOT_DURATION_MILLIS, }, - core::{ - configuration::{TestAuthorities, TestConfiguration}, - mock::runtime_api::session_info_for_peers, - NODE_UNDER_TEST, - }, - ApprovalsOptions, TestObjective, + configuration::{TestAuthorities, TestConfiguration}, + mock::runtime_api::session_info_for_peers, + NODE_UNDER_TEST, }; use futures::SinkExt; use itertools::Itertools; @@ -132,11 +129,7 @@ impl PeerMessagesGenerator { options: &ApprovalsOptions, ) -> String { let mut fingerprint = options.fingerprint(); - let mut exclude_objective = configuration.clone(); - // The objective contains the full content of `ApprovalOptions`, we don't want to put all of - // that in fingerprint, so execlute it because we add it manually see above. - exclude_objective.objective = TestObjective::Unimplemented; - let configuration_bytes = bincode::serialize(&exclude_objective).unwrap(); + let configuration_bytes = bincode::serialize(&configuration).unwrap(); fingerprint.extend(configuration_bytes); let mut sha1 = sha1::Sha1::new(); sha1.update(fingerprint); diff --git a/polkadot/node/subsystem-bench/src/approval/mock_chain_selection.rs b/polkadot/node/subsystem-bench/src/lib/approval/mock_chain_selection.rs similarity index 100% rename from polkadot/node/subsystem-bench/src/approval/mock_chain_selection.rs rename to polkadot/node/subsystem-bench/src/lib/approval/mock_chain_selection.rs diff --git a/polkadot/node/subsystem-bench/src/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs similarity index 97% rename from polkadot/node/subsystem-bench/src/approval/mod.rs rename to polkadot/node/subsystem-bench/src/lib/approval/mod.rs index f07912de188..450faf06123 100644 --- a/polkadot/node/subsystem-bench/src/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -24,25 +24,21 @@ use crate::{ mock_chain_selection::MockChainSelection, test_message::{MessagesBundle, TestMessageInfo}, }, - core::{ - configuration::TestAuthorities, - environment::{ - BenchmarkUsage, TestEnvironment, TestEnvironmentDependencies, MAX_TIME_OF_FLIGHT, - }, - mock::{ - chain_api::{ChainApiState, MockChainApi}, - dummy_builder, - network_bridge::{MockNetworkBridgeRx, MockNetworkBridgeTx}, - runtime_api::MockRuntimeApi, - AlwaysSupportsParachains, TestSyncOracle, - }, - network::{ - new_network, HandleNetworkMessage, NetworkEmulatorHandle, NetworkInterface, - NetworkInterfaceReceiver, - }, - NODE_UNDER_TEST, + configuration::{TestAuthorities, TestConfiguration}, + dummy_builder, + environment::{TestEnvironment, TestEnvironmentDependencies, MAX_TIME_OF_FLIGHT}, + mock::{ + chain_api::{ChainApiState, MockChainApi}, + network_bridge::{MockNetworkBridgeRx, MockNetworkBridgeTx}, + runtime_api::MockRuntimeApi, + AlwaysSupportsParachains, TestSyncOracle, }, - TestConfiguration, + network::{ + new_network, HandleNetworkMessage, NetworkEmulatorHandle, NetworkInterface, + NetworkInterfaceReceiver, + }, + usage::BenchmarkUsage, + NODE_UNDER_TEST, }; use colored::Colorize; use futures::channel::oneshot; @@ -472,11 +468,9 @@ impl ApprovalTestState { impl HandleNetworkMessage for ApprovalTestState { fn handle( &self, - _message: crate::core::network::NetworkMessage, - _node_sender: &mut futures::channel::mpsc::UnboundedSender< - crate::core::network::NetworkMessage, - >, - ) -> Option { + _message: crate::network::NetworkMessage, + _node_sender: &mut futures::channel::mpsc::UnboundedSender, + ) -> Option { self.total_sent_messages_from_node .as_ref() .fetch_add(1, std::sync::atomic::Ordering::SeqCst); @@ -841,8 +835,14 @@ fn build_overseer( pub fn prepare_test( config: TestConfiguration, options: ApprovalsOptions, + with_prometheus_endpoint: bool, ) -> (TestEnvironment, ApprovalTestState) { - prepare_test_inner(config, TestEnvironmentDependencies::default(), options) + prepare_test_inner( + config, + TestEnvironmentDependencies::default(), + options, + with_prometheus_endpoint, + ) } /// Build the test environment for an Approval benchmark. @@ -850,6 +850,7 @@ fn prepare_test_inner( config: TestConfiguration, dependencies: TestEnvironmentDependencies, options: ApprovalsOptions, + with_prometheus_endpoint: bool, ) -> (TestEnvironment, ApprovalTestState) { gum::info!("Prepare test state"); let state = ApprovalTestState::new(&config, options, &dependencies); @@ -878,6 +879,7 @@ fn prepare_test_inner( overseer, overseer_handle, state.test_authorities.clone(), + with_prometheus_endpoint, ), state, ) diff --git a/polkadot/node/subsystem-bench/src/approval/test_message.rs b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs similarity index 98% rename from polkadot/node/subsystem-bench/src/approval/test_message.rs rename to polkadot/node/subsystem-bench/src/lib/approval/test_message.rs index 8aaabc3426c..63e383509be 100644 --- a/polkadot/node/subsystem-bench/src/approval/test_message.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/test_message.rs @@ -15,9 +15,8 @@ // along with Polkadot. If not, see . use crate::{ - approval::{BlockTestData, CandidateTestData}, - core::configuration::TestAuthorities, - ApprovalsOptions, + approval::{ApprovalsOptions, BlockTestData, CandidateTestData}, + configuration::TestAuthorities, }; use itertools::Itertools; use parity_scale_codec::{Decode, Encode}; diff --git a/polkadot/node/subsystem-bench/src/availability/av_store_helpers.rs b/polkadot/node/subsystem-bench/src/lib/availability/av_store_helpers.rs similarity index 94% rename from polkadot/node/subsystem-bench/src/availability/av_store_helpers.rs rename to polkadot/node/subsystem-bench/src/lib/availability/av_store_helpers.rs index 261dbd0376c..3300def2235 100644 --- a/polkadot/node/subsystem-bench/src/availability/av_store_helpers.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/av_store_helpers.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::core::{environment::TestEnvironmentDependencies, mock::TestSyncOracle}; +use crate::{environment::TestEnvironmentDependencies, mock::TestSyncOracle}; use polkadot_node_core_av_store::{AvailabilityStoreSubsystem, Config}; use polkadot_node_metrics::metrics::Metrics; use polkadot_node_subsystem_util::database::Database; diff --git a/polkadot/node/subsystem-bench/src/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs similarity index 94% rename from polkadot/node/subsystem-bench/src/availability/mod.rs rename to polkadot/node/subsystem-bench/src/lib/availability/mod.rs index ad9a17ff8f4..f012a5a907e 100644 --- a/polkadot/node/subsystem-bench/src/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs @@ -15,22 +15,18 @@ // along with Polkadot. If not, see . use crate::{ - core::{ - configuration::TestConfiguration, - environment::{BenchmarkUsage, TestEnvironmentDependencies}, - mock::{ - av_store, - av_store::MockAvailabilityStore, - chain_api::{ChainApiState, MockChainApi}, - dummy_builder, - network_bridge::{self, MockNetworkBridgeRx, MockNetworkBridgeTx}, - runtime_api, - runtime_api::MockRuntimeApi, - AlwaysSupportsParachains, - }, - network::new_network, + configuration::TestConfiguration, + dummy_builder, + environment::{TestEnvironment, TestEnvironmentDependencies, GENESIS_HASH}, + mock::{ + av_store::{self, MockAvailabilityStore}, + chain_api::{ChainApiState, MockChainApi}, + network_bridge::{self, MockNetworkBridgeRx, MockNetworkBridgeTx}, + runtime_api::{self, MockRuntimeApi}, + AlwaysSupportsParachains, }, - TestEnvironment, TestObjective, GENESIS_HASH, + network::new_network, + usage::BenchmarkUsage, }; use av_store::NetworkAvailabilityState; use av_store_helpers::new_av_store; @@ -73,14 +69,30 @@ use sc_network::{ PeerId, }; use sc_service::SpawnTaskHandle; +use serde::{Deserialize, Serialize}; use sp_core::H256; use std::{collections::HashMap, iter::Cycle, ops::Sub, sync::Arc, time::Instant}; mod av_store_helpers; -pub(crate) mod cli; const LOG_TARGET: &str = "subsystem-bench::availability"; +#[derive(Debug, Clone, Serialize, Deserialize, clap::Parser)] +#[clap(rename_all = "kebab-case")] +#[allow(missing_docs)] +pub struct DataAvailabilityReadOptions { + #[clap(short, long, default_value_t = false)] + /// Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU as + /// we don't need to re-construct from chunks. Tipically this is only faster if nodes have + /// enough bandwidth. + pub fetch_from_backers: bool, +} + +pub enum TestDataAvailability { + Read(DataAvailabilityReadOptions), + Write, +} + fn build_overseer_for_availability_read( spawn_task_handle: SpawnTaskHandle, runtime_api: MockRuntimeApi, @@ -141,14 +153,24 @@ fn build_overseer_for_availability_write( pub fn prepare_test( config: TestConfiguration, state: &mut TestState, + mode: TestDataAvailability, + with_prometheus_endpoint: bool, ) -> (TestEnvironment, Vec) { - prepare_test_inner(config, state, TestEnvironmentDependencies::default()) + prepare_test_inner( + config, + state, + mode, + TestEnvironmentDependencies::default(), + with_prometheus_endpoint, + ) } fn prepare_test_inner( config: TestConfiguration, state: &mut TestState, + mode: TestDataAvailability, dependencies: TestEnvironmentDependencies, + with_prometheus_endpoint: bool, ) -> (TestEnvironment, Vec) { // Generate test authorities. let test_authorities = config.generate_authorities(); @@ -216,8 +238,8 @@ fn prepare_test_inner( let network_bridge_rx = network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_cfg.clone())); - let (overseer, overseer_handle) = match &state.config().objective { - TestObjective::DataAvailabilityRead(options) => { + let (overseer, overseer_handle) = match &mode { + TestDataAvailability::Read(options) => { let use_fast_path = options.fetch_from_backers; let subsystem = if use_fast_path { @@ -247,7 +269,7 @@ fn prepare_test_inner( &dependencies, ) }, - TestObjective::DataAvailabilityWrite => { + TestDataAvailability::Write => { let availability_distribution = AvailabilityDistributionSubsystem::new( test_authorities.keyring.keystore(), IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver }, @@ -284,9 +306,6 @@ fn prepare_test_inner( &dependencies, ) }, - _ => { - unimplemented!("Invalid test objective") - }, }; ( @@ -297,6 +316,7 @@ fn prepare_test_inner( overseer, overseer_handle, test_authorities, + with_prometheus_endpoint, ), req_cfgs, ) @@ -326,10 +346,6 @@ pub struct TestState { } impl TestState { - fn config(&self) -> &TestConfiguration { - &self.config - } - pub fn next_candidate(&mut self) -> Option { let candidate = self.candidates.next(); let candidate_hash = candidate.as_ref().unwrap().hash(); diff --git a/polkadot/node/subsystem-bench/src/core/configuration.rs b/polkadot/node/subsystem-bench/src/lib/configuration.rs similarity index 83% rename from polkadot/node/subsystem-bench/src/core/configuration.rs rename to polkadot/node/subsystem-bench/src/lib/configuration.rs index 00be2a86b17..c7693308527 100644 --- a/polkadot/node/subsystem-bench/src/core/configuration.rs +++ b/polkadot/node/subsystem-bench/src/lib/configuration.rs @@ -16,7 +16,7 @@ //! Test configuration definition and helpers. -use crate::{core::keyring::Keyring, TestObjective}; +use crate::keyring::Keyring; use itertools::Itertools; use polkadot_primitives::{AssignmentId, AuthorityDiscoveryId, ValidatorId}; use rand::thread_rng; @@ -24,17 +24,7 @@ use rand_distr::{Distribution, Normal, Uniform}; use sc_network::PeerId; use serde::{Deserialize, Serialize}; use sp_consensus_babe::AuthorityId; -use std::{collections::HashMap, path::Path}; - -pub fn random_pov_size(min_pov_size: usize, max_pov_size: usize) -> usize { - random_uniform_sample(min_pov_size, max_pov_size) -} - -fn random_uniform_sample + From>(min_value: T, max_value: T) -> T { - Uniform::from(min_value.into()..=max_value.into()) - .sample(&mut thread_rng()) - .into() -} +use std::collections::HashMap; /// Peer networking latency configuration. #[derive(Clone, Debug, Default, Serialize, Deserialize)] @@ -87,8 +77,6 @@ fn default_no_show_slots() -> usize { /// The test input parameters #[derive(Clone, Debug, Serialize, Deserialize)] pub struct TestConfiguration { - /// The test objective - pub objective: TestObjective, /// Number of validators pub n_validators: usize, /// Number of cores @@ -115,7 +103,7 @@ pub struct TestConfiguration { pub max_pov_size: usize, /// Randomly sampled pov_sizes #[serde(skip)] - pov_sizes: Vec, + pub pov_sizes: Vec, /// The amount of bandiwdth remote validators have. #[serde(default = "default_bandwidth")] pub peer_bandwidth: usize, @@ -133,56 +121,32 @@ pub struct TestConfiguration { pub num_blocks: usize, } -fn generate_pov_sizes(count: usize, min_kib: usize, max_kib: usize) -> Vec { - (0..count).map(|_| random_pov_size(min_kib * 1024, max_kib * 1024)).collect() -} - -#[derive(Serialize, Deserialize)] -pub struct TestSequence { - #[serde(rename(serialize = "TestConfiguration", deserialize = "TestConfiguration"))] - test_configurations: Vec, -} - -impl TestSequence { - pub fn into_vec(self) -> Vec { - self.test_configurations - .into_iter() - .map(|mut config| { - config.pov_sizes = - generate_pov_sizes(config.n_cores, config.min_pov_size, config.max_pov_size); - config - }) - .collect() - } -} - -impl TestSequence { - pub fn new_from_file(path: &Path) -> std::io::Result { - let string = String::from_utf8(std::fs::read(path)?).expect("File is valid UTF8"); - Ok(serde_yaml::from_str(&string).expect("File is valid test sequence YA")) +impl Default for TestConfiguration { + fn default() -> Self { + Self { + n_validators: Default::default(), + n_cores: Default::default(), + needed_approvals: default_needed_approvals(), + zeroth_delay_tranche_width: default_zeroth_delay_tranche_width(), + relay_vrf_modulo_samples: default_relay_vrf_modulo_samples(), + n_delay_tranches: default_n_delay_tranches(), + no_show_slots: default_no_show_slots(), + max_validators_per_core: default_backing_group_size(), + min_pov_size: default_pov_size(), + max_pov_size: default_pov_size(), + pov_sizes: Default::default(), + peer_bandwidth: default_bandwidth(), + bandwidth: default_bandwidth(), + latency: Default::default(), + connectivity: default_connectivity(), + num_blocks: Default::default(), + } } } -/// Helper struct for authority related state. -#[derive(Clone)] -pub struct TestAuthorities { - pub keyring: Keyring, - pub validator_public: Vec, - pub validator_authority_id: Vec, - pub validator_babe_id: Vec, - pub validator_assignment_id: Vec, - pub key_seeds: Vec, - pub peer_ids: Vec, - pub peer_id_to_authority: HashMap, -} - impl TestConfiguration { - #[allow(unused)] - pub fn write_to_disk(&self) { - // Serialize a slice of configurations - let yaml = serde_yaml::to_string(&TestSequence { test_configurations: vec![self.clone()] }) - .unwrap(); - std::fs::write("last_test.yaml", yaml).unwrap(); + pub fn generate_pov_sizes(&mut self) { + self.pov_sizes = generate_pov_sizes(self.n_cores, self.min_pov_size, self.max_pov_size); } pub fn pov_sizes(&self) -> &[usize] { @@ -239,6 +203,33 @@ impl TestConfiguration { } } +fn random_uniform_sample + From>(min_value: T, max_value: T) -> T { + Uniform::from(min_value.into()..=max_value.into()) + .sample(&mut thread_rng()) + .into() +} + +fn random_pov_size(min_pov_size: usize, max_pov_size: usize) -> usize { + random_uniform_sample(min_pov_size, max_pov_size) +} + +fn generate_pov_sizes(count: usize, min_kib: usize, max_kib: usize) -> Vec { + (0..count).map(|_| random_pov_size(min_kib * 1024, max_kib * 1024)).collect() +} + +/// Helper struct for authority related state. +#[derive(Clone)] +pub struct TestAuthorities { + pub keyring: Keyring, + pub validator_public: Vec, + pub validator_authority_id: Vec, + pub validator_babe_id: Vec, + pub validator_assignment_id: Vec, + pub key_seeds: Vec, + pub peer_ids: Vec, + pub peer_id_to_authority: HashMap, +} + /// Sample latency (in milliseconds) from a normal distribution with parameters /// specified in `maybe_peer_latency`. pub fn random_latency(maybe_peer_latency: Option<&PeerLatency>) -> usize { diff --git a/polkadot/node/subsystem-bench/src/core/display.rs b/polkadot/node/subsystem-bench/src/lib/display.rs similarity index 89% rename from polkadot/node/subsystem-bench/src/core/display.rs rename to polkadot/node/subsystem-bench/src/lib/display.rs index 13a349382e2..b153d54a7c3 100644 --- a/polkadot/node/subsystem-bench/src/core/display.rs +++ b/polkadot/node/subsystem-bench/src/lib/display.rs @@ -19,7 +19,7 @@ //! //! Currently histogram buckets are skipped. -use crate::{TestConfiguration, LOG_TARGET}; +use crate::configuration::TestConfiguration; use colored::Colorize; use prometheus::{ proto::{MetricFamily, MetricType}, @@ -27,6 +27,8 @@ use prometheus::{ }; use std::fmt::Display; +const LOG_TARGET: &str = "subsystem-bench::display"; + #[derive(Default, Debug)] pub struct MetricCollection(Vec); @@ -85,6 +87,7 @@ impl Display for MetricCollection { Ok(()) } } + #[derive(Debug, Clone)] pub struct TestMetric { name: String, @@ -184,15 +187,16 @@ pub fn parse_metrics(registry: &Registry) -> MetricCollection { test_metrics.into() } -pub fn display_configuration(test_config: &TestConfiguration) { - gum::info!( - "[{}] {}, {}, {}, {}, {}", - format!("objective = {:?}", test_config.objective).green(), - format!("n_validators = {}", test_config.n_validators).blue(), - format!("n_cores = {}", test_config.n_cores).blue(), - format!("pov_size = {} - {}", test_config.min_pov_size, test_config.max_pov_size) - .bright_black(), - format!("connectivity = {}", test_config.connectivity).bright_black(), - format!("latency = {:?}", test_config.latency).bright_black(), - ); +impl Display for TestConfiguration { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}, {}, {}, {}, {}", + format!("n_validators = {}", self.n_validators).blue(), + format!("n_cores = {}", self.n_cores).blue(), + format!("pov_size = {} - {}", self.min_pov_size, self.max_pov_size).bright_black(), + format!("connectivity = {}", self.connectivity).bright_black(), + format!("latency = {:?}", self.latency).bright_black(), + ) + } } diff --git a/polkadot/node/subsystem-bench/src/core/environment.rs b/polkadot/node/subsystem-bench/src/lib/environment.rs similarity index 88% rename from polkadot/node/subsystem-bench/src/core/environment.rs rename to polkadot/node/subsystem-bench/src/lib/environment.rs index ca4c41cf45f..958ed50d089 100644 --- a/polkadot/node/subsystem-bench/src/core/environment.rs +++ b/polkadot/node/subsystem-bench/src/lib/environment.rs @@ -17,13 +17,11 @@ //! Test environment implementation use crate::{ - core::{ - configuration::TestAuthorities, mock::AlwaysSupportsParachains, - network::NetworkEmulatorHandle, - }, - TestConfiguration, + configuration::{TestAuthorities, TestConfiguration}, + mock::AlwaysSupportsParachains, + network::NetworkEmulatorHandle, + usage::{BenchmarkUsage, ResourceUsage}, }; -use colored::Colorize; use core::time::Duration; use futures::{Future, FutureExt}; use polkadot_node_subsystem::{messages::AllMessages, Overseer, SpawnGlue, TimeoutExt}; @@ -33,7 +31,6 @@ use polkadot_node_subsystem_util::metrics::prometheus::{ }; use polkadot_overseer::{BlockInfo, Handle as OverseerHandle}; use sc_service::{SpawnTaskHandle, TaskManager}; -use serde::{Deserialize, Serialize}; use std::net::{Ipv4Addr, SocketAddr}; use tokio::runtime::Handle; @@ -204,6 +201,7 @@ impl TestEnvironment { overseer: Overseer, AlwaysSupportsParachains>, overseer_handle: OverseerHandle, authorities: TestAuthorities, + with_prometheus_endpoint: bool, ) -> Self { let metrics = TestEnvironmentMetrics::new(&dependencies.registry) .expect("Metrics need to be registered"); @@ -211,19 +209,21 @@ impl TestEnvironment { let spawn_handle = dependencies.task_manager.spawn_handle(); spawn_handle.spawn_blocking("overseer", "overseer", overseer.run().boxed()); - let registry_clone = dependencies.registry.clone(); - dependencies.task_manager.spawn_handle().spawn_blocking( - "prometheus", - "test-environment", - async move { - prometheus_endpoint::init_prometheus( - SocketAddr::new(std::net::IpAddr::V4(Ipv4Addr::LOCALHOST), 9999), - registry_clone, - ) - .await - .unwrap(); - }, - ); + if with_prometheus_endpoint { + let registry_clone = dependencies.registry.clone(); + dependencies.task_manager.spawn_handle().spawn_blocking( + "prometheus", + "test-environment", + async move { + prometheus_endpoint::init_prometheus( + SocketAddr::new(std::net::IpAddr::V4(Ipv4Addr::LOCALHOST), 9999), + registry_clone, + ) + .await + .unwrap(); + }, + ); + } TestEnvironment { runtime_handle: dependencies.runtime.handle().clone(), @@ -411,41 +411,3 @@ impl TestEnvironment { usage } } - -#[derive(Debug, Serialize, Deserialize)] -pub struct BenchmarkUsage { - benchmark_name: String, - network_usage: Vec, - cpu_usage: Vec, -} - -impl std::fmt::Display for BenchmarkUsage { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!( - f, - "\n{}\n\n{}\n{}\n\n{}\n{}\n", - self.benchmark_name.purple(), - format!("{:<32}{:>12}{:>12}", "Network usage, KiB", "total", "per block").blue(), - self.network_usage - .iter() - .map(|v| v.to_string()) - .collect::>() - .join("\n"), - format!("{:<32}{:>12}{:>12}", "CPU usage in seconds", "total", "per block").blue(), - self.cpu_usage.iter().map(|v| v.to_string()).collect::>().join("\n") - ) - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct ResourceUsage { - resource_name: String, - total: f64, - per_block: f64, -} - -impl std::fmt::Display for ResourceUsage { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{:<32}{:>12.3}{:>12.3}", self.resource_name.cyan(), self.total, self.per_block) - } -} diff --git a/polkadot/node/subsystem-bench/src/core/keyring.rs b/polkadot/node/subsystem-bench/src/lib/keyring.rs similarity index 100% rename from polkadot/node/subsystem-bench/src/core/keyring.rs rename to polkadot/node/subsystem-bench/src/lib/keyring.rs diff --git a/polkadot/node/subsystem-bench/src/core/mod.rs b/polkadot/node/subsystem-bench/src/lib/lib.rs similarity index 88% rename from polkadot/node/subsystem-bench/src/core/mod.rs rename to polkadot/node/subsystem-bench/src/lib/lib.rs index 764184c5b37..d06f2822a89 100644 --- a/polkadot/node/subsystem-bench/src/core/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/lib.rs @@ -15,11 +15,14 @@ // along with Polkadot. If not, see . // The validator index that represent the node that is under test. -pub(crate) const NODE_UNDER_TEST: u32 = 0; +pub const NODE_UNDER_TEST: u32 = 0; -pub(crate) mod configuration; +pub mod approval; +pub mod availability; +pub mod configuration; pub(crate) mod display; pub(crate) mod environment; pub(crate) mod keyring; pub(crate) mod mock; pub(crate) mod network; +pub mod usage; diff --git a/polkadot/node/subsystem-bench/src/core/mock/av_store.rs b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs similarity index 99% rename from polkadot/node/subsystem-bench/src/core/mock/av_store.rs rename to polkadot/node/subsystem-bench/src/lib/mock/av_store.rs index 0a7725c91e0..41c4fe2cbad 100644 --- a/polkadot/node/subsystem-bench/src/core/mock/av_store.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs @@ -16,7 +16,7 @@ //! A generic av store subsystem mockup suitable to be used in benchmarks. -use crate::core::network::{HandleNetworkMessage, NetworkMessage}; +use crate::network::{HandleNetworkMessage, NetworkMessage}; use futures::{channel::oneshot, FutureExt}; use parity_scale_codec::Encode; use polkadot_node_network_protocol::request_response::{ diff --git a/polkadot/node/subsystem-bench/src/core/mock/chain_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs similarity index 100% rename from polkadot/node/subsystem-bench/src/core/mock/chain_api.rs rename to polkadot/node/subsystem-bench/src/lib/mock/chain_api.rs diff --git a/polkadot/node/subsystem-bench/src/core/mock/dummy.rs b/polkadot/node/subsystem-bench/src/lib/mock/dummy.rs similarity index 100% rename from polkadot/node/subsystem-bench/src/core/mock/dummy.rs rename to polkadot/node/subsystem-bench/src/lib/mock/dummy.rs diff --git a/polkadot/node/subsystem-bench/src/core/mock/mod.rs b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs similarity index 97% rename from polkadot/node/subsystem-bench/src/core/mock/mod.rs rename to polkadot/node/subsystem-bench/src/lib/mock/mod.rs index 46fdeb196c0..6dda9a47d39 100644 --- a/polkadot/node/subsystem-bench/src/core/mock/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs @@ -34,9 +34,10 @@ impl HeadSupportsParachains for AlwaysSupportsParachains { } // An orchestra with dummy subsystems +#[macro_export] macro_rules! dummy_builder { ($spawn_task_handle: ident, $metrics: ident) => {{ - use $crate::core::mock::dummy::*; + use $crate::mock::dummy::*; // Initialize a mock overseer. // All subsystem except approval_voting and approval_distribution are mock subsystems. @@ -72,7 +73,6 @@ macro_rules! dummy_builder { .spawner(SpawnGlue($spawn_task_handle)) }}; } -pub(crate) use dummy_builder; #[derive(Clone)] pub struct TestSyncOracle {} diff --git a/polkadot/node/subsystem-bench/src/core/mock/network_bridge.rs b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs similarity index 99% rename from polkadot/node/subsystem-bench/src/core/mock/network_bridge.rs rename to polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs index 4682c7ec79a..d598f6447d3 100644 --- a/polkadot/node/subsystem-bench/src/core/mock/network_bridge.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs @@ -17,7 +17,7 @@ //! Mocked `network-bridge` subsystems that uses a `NetworkInterface` to access //! the emulated network. -use crate::core::{ +use crate::{ configuration::TestAuthorities, network::{NetworkEmulatorHandle, NetworkInterfaceReceiver, NetworkMessage, RequestExt}, }; diff --git a/polkadot/node/subsystem-bench/src/core/mock/runtime_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs similarity index 99% rename from polkadot/node/subsystem-bench/src/core/mock/runtime_api.rs rename to polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs index 0dd76efcbaf..53faf562f03 100644 --- a/polkadot/node/subsystem-bench/src/core/mock/runtime_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs @@ -16,7 +16,7 @@ //! A generic runtime api subsystem mockup suitable to be used in benchmarks. -use crate::core::configuration::{TestAuthorities, TestConfiguration}; +use crate::configuration::{TestAuthorities, TestConfiguration}; use bitvec::prelude::BitVec; use futures::FutureExt; use itertools::Itertools; diff --git a/polkadot/node/subsystem-bench/src/core/network.rs b/polkadot/node/subsystem-bench/src/lib/network.rs similarity index 99% rename from polkadot/node/subsystem-bench/src/core/network.rs rename to polkadot/node/subsystem-bench/src/lib/network.rs index e9124726d7c..1e09441792d 100644 --- a/polkadot/node/subsystem-bench/src/core/network.rs +++ b/polkadot/node/subsystem-bench/src/lib/network.rs @@ -33,7 +33,7 @@ // | // Subsystems under test -use crate::core::{ +use crate::{ configuration::{random_latency, TestAuthorities, TestConfiguration}, environment::TestEnvironmentDependencies, NODE_UNDER_TEST, diff --git a/polkadot/node/subsystem-bench/src/lib/usage.rs b/polkadot/node/subsystem-bench/src/lib/usage.rs new file mode 100644 index 00000000000..b83ef7d98d9 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/lib/usage.rs @@ -0,0 +1,146 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test usage implementation + +use colored::Colorize; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +#[derive(Debug, Serialize, Deserialize)] +pub struct BenchmarkUsage { + pub benchmark_name: String, + pub network_usage: Vec, + pub cpu_usage: Vec, +} + +impl std::fmt::Display for BenchmarkUsage { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!( + f, + "\n{}\n\n{}\n{}\n\n{}\n{}\n", + self.benchmark_name.purple(), + format!("{:<32}{:>12}{:>12}", "Network usage, KiB", "total", "per block").blue(), + self.network_usage + .iter() + .map(|v| v.to_string()) + .collect::>() + .join("\n"), + format!("{:<32}{:>12}{:>12}", "CPU usage, seconds", "total", "per block").blue(), + self.cpu_usage.iter().map(|v| v.to_string()).collect::>().join("\n") + ) + } +} + +impl BenchmarkUsage { + pub fn average(usages: &[Self]) -> Self { + let all_network_usages: Vec<&ResourceUsage> = + usages.iter().flat_map(|v| &v.network_usage).collect(); + let all_cpu_usage: Vec<&ResourceUsage> = usages.iter().flat_map(|v| &v.cpu_usage).collect(); + + Self { + benchmark_name: usages.first().map(|v| v.benchmark_name.clone()).unwrap_or_default(), + network_usage: ResourceUsage::average_by_resource_name(&all_network_usages), + cpu_usage: ResourceUsage::average_by_resource_name(&all_cpu_usage), + } + } + + pub fn check_network_usage(&self, checks: &[ResourceUsageCheck]) -> Vec { + check_usage(&self.benchmark_name, &self.network_usage, checks) + } + + pub fn check_cpu_usage(&self, checks: &[ResourceUsageCheck]) -> Vec { + check_usage(&self.benchmark_name, &self.cpu_usage, checks) + } + + pub fn cpu_usage_diff(&self, other: &Self, resource_name: &str) -> Option { + let self_res = self.cpu_usage.iter().find(|v| v.resource_name == resource_name); + let other_res = other.cpu_usage.iter().find(|v| v.resource_name == resource_name); + + match (self_res, other_res) { + (Some(self_res), Some(other_res)) => Some(self_res.diff(other_res)), + _ => None, + } + } +} + +fn check_usage( + benchmark_name: &str, + usage: &[ResourceUsage], + checks: &[ResourceUsageCheck], +) -> Vec { + checks + .iter() + .filter_map(|check| { + check_resource_usage(usage, check) + .map(|message| format!("{}: {}", benchmark_name, message)) + }) + .collect() +} + +fn check_resource_usage( + usage: &[ResourceUsage], + (resource_name, base, precision): &ResourceUsageCheck, +) -> Option { + if let Some(usage) = usage.iter().find(|v| v.resource_name == *resource_name) { + let diff = (base - usage.per_block).abs() / base; + if diff < *precision { + None + } else { + Some(format!( + "The resource `{}` is expected to be equal to {} with a precision {}, but the current value is {}", + resource_name, base, precision, usage.per_block + )) + } + } else { + Some(format!("The resource `{}` is not found", resource_name)) + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ResourceUsage { + pub resource_name: String, + pub total: f64, + pub per_block: f64, +} + +impl std::fmt::Display for ResourceUsage { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{:<32}{:>12.3}{:>12.3}", self.resource_name.cyan(), self.total, self.per_block) + } +} + +impl ResourceUsage { + fn average_by_resource_name(usages: &[&Self]) -> Vec { + let mut by_name: HashMap> = Default::default(); + for usage in usages { + by_name.entry(usage.resource_name.clone()).or_default().push(usage); + } + let mut average = vec![]; + for (resource_name, values) in by_name { + let total = values.iter().map(|v| v.total).sum::() / values.len() as f64; + let per_block = values.iter().map(|v| v.per_block).sum::() / values.len() as f64; + average.push(Self { resource_name, total, per_block }); + } + average + } + + fn diff(&self, other: &Self) -> f64 { + (self.per_block - other.per_block).abs() / self.per_block + } +} + +type ResourceUsageCheck<'a> = (&'a str, f64, f64); -- GitLab From 62b78a16157134a1c38e0081eb60a1c03ac15df3 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Fri, 1 Mar 2024 20:25:24 +0200 Subject: [PATCH 32/86] provisioner: allow multiple cores assigned to the same para (#3233) https://github.com/paritytech/polkadot-sdk/issues/3130 builds on top of https://github.com/paritytech/polkadot-sdk/pull/3160 Processes the availability cores and builds a record of how many candidates it should request from prospective-parachains and their predecessors. Tries to supply as many candidates as the runtime can back. Note that the runtime changes to back multiple candidates per para are not yet done, but this paves the way for it. The following backing/inclusion policy is assumed: 1. the runtime will never back candidates of the same para which don't form a chain with the already backed candidates. Even if the others are still pending availability. We're optimistic that they won't time out and we don't want to back parachain forks (as the complexity would be huge). 2. if a candidate is timed out of the core before being included, all of its successors occupying a core will be evicted. 3. only the candidates which are made available and form a chain starting from the on-chain para head may be included/enacted and cleared from the cores. In other words, if para head is at A and the cores are occupied by B->C->D, and B and D are made available, only B will be included and its core cleared. C and D will remain on the cores awaiting for C to be made available or timed out. As point (2) above already says, if C is timed out, D will also be dropped. 4. The runtime will deduplicate candidates which form a cycle. For example if the provisioner supplies candidates A->B->A, the runtime will only back A (as the state output will be the same) Note that if a candidate is timed out, we don't guarantee that in the next relay chain block the block author will be able to fill all of the timed out cores of the para. That increases complexity by a lot. Instead, the provisioner will supply N candidates where N is the number of candidates timed out, but doesn't include their successors which will be also deleted by the runtime. This'll be backfilled in the next relay chain block. Adjacent changes: - Also fixes: https://github.com/paritytech/polkadot-sdk/issues/3141 - For non prospective-parachains, don't supply multiple candidates per para (we can't have elastic scaling without prospective parachains enabled). paras_inherent should already sanitise this input but it's more efficient this way. Note: all of these changes are backwards-compatible with the non-elastic-scaling scenario (one core per para). --- Cargo.lock | 3 + .../core/prospective-parachains/Cargo.toml | 1 + .../src/fragment_tree.rs | 570 ++++++++++++++-- .../core/prospective-parachains/src/lib.rs | 19 +- .../core/prospective-parachains/src/tests.rs | 497 +++++--------- polkadot/node/core/provisioner/Cargo.toml | 2 + polkadot/node/core/provisioner/src/error.rs | 12 +- polkadot/node/core/provisioner/src/lib.rs | 234 ++++--- polkadot/node/core/provisioner/src/tests.rs | 629 +++++++++++++++--- polkadot/node/subsystem-types/src/messages.rs | 14 +- prdoc/pr_3233.prdoc | 12 + 11 files changed, 1412 insertions(+), 581 deletions(-) create mode 100644 prdoc/pr_3233.prdoc diff --git a/Cargo.lock b/Cargo.lock index b12006bbcf7..c42ec612e6c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12756,6 +12756,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rstest", "sc-keystore", "sp-application-crypto", "sp-core", @@ -12779,6 +12780,8 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rstest", + "schnellru", "sp-application-crypto", "sp-keystore", "thiserror", diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 5b62d90c1d4..f66a66e859e 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -23,6 +23,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } [dev-dependencies] +rstest = "0.18.2" assert_matches = "1" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-node-subsystem-types = { path = "../../subsystem-types" } diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs index 04ee42a9de0..8061dc82d83 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs @@ -96,6 +96,7 @@ use std::{ use super::LOG_TARGET; use bitvec::prelude::*; +use polkadot_node_subsystem::messages::Ancestors; use polkadot_node_subsystem_util::inclusion_emulator::{ ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, }; @@ -756,45 +757,46 @@ impl FragmentTree { depths.iter_ones().collect() } - /// Select `count` candidates after the given `required_path` which pass + /// Select `count` candidates after the given `ancestors` which pass /// the predicate and have not already been backed on chain. /// - /// Does an exhaustive search into the tree starting after `required_path`. - /// If there are multiple possibilities of size `count`, this will select the first one. - /// If there is no chain of size `count` that matches the criteria, this will return the largest - /// chain it could find with the criteria. - /// If there are no candidates meeting those criteria, returns an empty `Vec`. - /// Cycles are accepted, see module docs for the `Cycles` section. + /// Does an exhaustive search into the tree after traversing the ancestors path. + /// If the ancestors draw out a path that can be traversed in multiple ways, no + /// candidates will be returned. + /// If the ancestors do not draw out a full path (the path contains holes), candidates will be + /// suggested that may fill these holes. + /// If the ancestors don't draw out a valid path, no candidates will be returned. If there are + /// multiple possibilities of the same size, this will select the first one. If there is no + /// chain of size `count` that matches the criteria, this will return the largest chain it could + /// find with the criteria. If there are no candidates meeting those criteria, returns an empty + /// `Vec`. + /// Cycles are accepted, but this code expects that the runtime will deduplicate + /// identical candidates when occupying the cores (when proposing to back A->B->A, only A will + /// be backed on chain). /// - /// The intention of the `required_path` is to allow queries on the basis of + /// The intention of the `ancestors` is to allow queries on the basis of /// one or more candidates which were previously pending availability becoming - /// available and opening up more room on the core. - pub(crate) fn select_children( + /// available or candidates timing out. + pub(crate) fn find_backable_chain( &self, - required_path: &[CandidateHash], + ancestors: Ancestors, count: u32, pred: impl Fn(&CandidateHash) -> bool, ) -> Vec { - let base_node = { - // traverse the required path. - let mut node = NodePointer::Root; - for required_step in required_path { - if let Some(next_node) = self.node_candidate_child(node, &required_step) { - node = next_node; - } else { - return vec![] - }; - } - - node - }; - - // TODO: taking the first best selection might introduce bias - // or become gameable. - // - // For plausibly unique parachains, this shouldn't matter much. - // figure out alternative selection criteria? - self.select_children_inner(base_node, count, count, &pred, &mut vec![]) + if count == 0 { + return vec![] + } + // First, we need to order the ancestors. + // The node returned is the one from which we can start finding new backable candidates. + let Some(base_node) = self.find_ancestor_path(ancestors) else { return vec![] }; + + self.find_backable_chain_inner( + base_node, + count, + count, + &pred, + &mut Vec::with_capacity(count as usize), + ) } // Try finding a candidate chain starting from `base_node` of length `expected_count`. @@ -805,7 +807,7 @@ impl FragmentTree { // Cycles are accepted, but this doesn't allow for infinite execution time, because the maximum // depth we'll reach is `expected_count`. // - // Worst case performance is `O(num_forks ^ expected_count)`. + // Worst case performance is `O(num_forks ^ expected_count)`, the same as populating the tree. // Although an exponential function, this is actually a constant that can only be altered via // sudo/governance, because: // 1. `num_forks` at a given level is at most `max_candidate_depth * max_validators_per_core` @@ -817,7 +819,7 @@ impl FragmentTree { // scaling scenario). For non-elastic-scaling, this is just 1. In practice, this should be a // small number (1-3), capped by the total number of available cores (a constant alterable // only via governance/sudo). - fn select_children_inner( + fn find_backable_chain_inner( &self, base_node: NodePointer, expected_count: u32, @@ -857,7 +859,7 @@ impl FragmentTree { for (child_ptr, child_hash) in children { accumulator.push(child_hash); - let result = self.select_children_inner( + let result = self.find_backable_chain_inner( child_ptr, expected_count, remaining_count - 1, @@ -869,6 +871,9 @@ impl FragmentTree { // Short-circuit the search if we've found the right length. Otherwise, we'll // search for a max. + // Taking the first best selection doesn't introduce bias or become gameable, + // because `find_ancestor_path` uses a `HashSet` to track the ancestors, which + // makes the order in which ancestors are visited non-deterministic. if result.len() == expected_count as usize { return result } else if best_result.len() < result.len() { @@ -879,6 +884,93 @@ impl FragmentTree { best_result } + // Orders the ancestors into a viable path from root to the last one. + // Returns a pointer to the last node in the path. + // We assume that the ancestors form a chain (that the + // av-cores do not back parachain forks), None is returned otherwise. + // If we cannot use all ancestors, stop at the first found hole in the chain. This usually + // translates to a timed out candidate. + fn find_ancestor_path(&self, mut ancestors: Ancestors) -> Option { + // The number of elements in the path we've processed so far. + let mut depth = 0; + let mut last_node = NodePointer::Root; + let mut next_node: Option = Some(NodePointer::Root); + + while let Some(node) = next_node { + if depth > self.scope.max_depth { + return None; + } + + last_node = node; + + next_node = match node { + NodePointer::Root => { + let children = self + .nodes + .iter() + .enumerate() + .take_while(|n| n.1.parent == NodePointer::Root) + .map(|(index, node)| (NodePointer::Storage(index), node.candidate_hash)) + .collect::>(); + + self.find_valid_child(&mut ancestors, children.iter()).ok()? + }, + NodePointer::Storage(ptr) => { + let children = self.nodes.get(ptr).and_then(|n| Some(n.children.iter())); + if let Some(children) = children { + self.find_valid_child(&mut ancestors, children).ok()? + } else { + None + } + }, + }; + + depth += 1; + } + + Some(last_node) + } + + // Find a node from the given iterator which is present in the ancestors + // collection. If there are multiple such nodes, return an error and log a warning. We don't + // accept forks in a parachain to be backed. The supplied ancestors should all form a chain. + // If there is no such node, return None. + fn find_valid_child<'a>( + &self, + ancestors: &'a mut Ancestors, + nodes: impl Iterator + 'a, + ) -> Result, ()> { + let mut possible_children = + nodes.filter_map(|(node_ptr, hash)| match ancestors.remove(&hash) { + true => Some(node_ptr), + false => None, + }); + + // We don't accept forks in a parachain to be backed. The supplied ancestors + // should all form a chain. + let next = possible_children.next(); + if let Some(second_child) = possible_children.next() { + if let (Some(NodePointer::Storage(first_child)), NodePointer::Storage(second_child)) = + (next, second_child) + { + gum::error!( + target: LOG_TARGET, + para_id = ?self.scope.para, + relay_parent = ?self.scope.relay_parent, + "Trying to find new backable candidates for a parachain for which we've backed a fork.\ + This is a bug and the runtime should not have allowed it.\n\ + Backed candidates with the same parent: {}, {}", + self.nodes[*first_child].candidate_hash, + self.nodes[*second_child].candidate_hash, + ); + } + + Err(()) + } else { + Ok(next.copied()) + } + } + fn populate_from_bases(&mut self, storage: &CandidateStorage, initial_bases: Vec) { // Populate the tree breadth-first. let mut last_sweep_start = None; @@ -1061,8 +1153,18 @@ mod tests { use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData}; use polkadot_primitives_test_helpers as test_helpers; + use rstest::rstest; use std::iter; + impl NodePointer { + fn unwrap_idx(self) -> usize { + match self { + NodePointer::Root => panic!("Unexpected root"), + NodePointer::Storage(index) => index, + } + } + } + fn make_constraints( min_relay_parent_number: BlockNumber, valid_watermarks: Vec, @@ -1546,6 +1648,373 @@ mod tests { assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); } + #[test] + fn test_find_ancestor_path_and_find_backable_chain_empty_tree() { + let para_id = ParaId::from(5u32); + let relay_parent = Hash::repeat_byte(1); + let required_parent: HeadData = vec![0xff].into(); + let max_depth = 10; + + // Empty tree + let storage = CandidateStorage::new(); + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); + + let relay_parent_info = + RelayChainBlockInfo { number: 0, hash: relay_parent, storage_root: Hash::zero() }; + + let scope = Scope::with_ancestors( + para_id, + relay_parent_info, + base_constraints, + vec![], + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + assert_eq!(tree.candidates().collect::>().len(), 0); + assert_eq!(tree.nodes.len(), 0); + + assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); + assert_eq!(tree.find_backable_chain(Ancestors::new(), 2, |_| true), vec![]); + // Invalid candidate. + let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()), Some(NodePointer::Root)); + assert_eq!(tree.find_backable_chain(ancestors, 2, |_| true), vec![]); + } + + #[rstest] + #[case(true, 13)] + #[case(false, 8)] + // The tree with no cycles looks like: + // Make a tree that looks like this (note that there's no cycle): + // +-(root)-+ + // | | + // +----0---+ 7 + // | | + // 1----+ 5 + // | | + // | | + // 2 6 + // | + // 3 + // | + // 4 + // + // The tree with cycles is the same as the first but has a cycle from 4 back to the state + // produced by 0 (It's bounded by the max_depth + 1). + // +-(root)-+ + // | | + // +----0---+ 7 + // | | + // 1----+ 5 + // | | + // | | + // 2 6 + // | + // 3 + // | + // 4---+ + // | | + // 1 5 + // | + // 2 + // | + // 3 + fn test_find_ancestor_path_and_find_backable_chain( + #[case] has_cycle: bool, + #[case] expected_node_count: usize, + ) { + let para_id = ParaId::from(5u32); + let relay_parent = Hash::repeat_byte(1); + let required_parent: HeadData = vec![0xff].into(); + let max_depth = 7; + let relay_parent_number = 0; + let relay_parent_storage_root = Hash::repeat_byte(69); + + let mut candidates = vec![]; + + // Candidate 0 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + required_parent.clone(), + vec![0].into(), + 0, + )); + // Candidate 1 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![0].into(), + vec![1].into(), + 0, + )); + // Candidate 2 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![1].into(), + vec![2].into(), + 0, + )); + // Candidate 3 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![2].into(), + vec![3].into(), + 0, + )); + // Candidate 4 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![3].into(), + vec![4].into(), + 0, + )); + // Candidate 5 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![0].into(), + vec![5].into(), + 0, + )); + // Candidate 6 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![1].into(), + vec![6].into(), + 0, + )); + // Candidate 7 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + required_parent.clone(), + vec![7].into(), + 0, + )); + + if has_cycle { + candidates[4] = make_committed_candidate( + para_id, + relay_parent, + 0, + vec![3].into(), + vec![0].into(), // put the cycle here back to the output state of 0. + 0, + ); + } + + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); + let mut storage = CandidateStorage::new(); + + let relay_parent_info = RelayChainBlockInfo { + number: relay_parent_number, + hash: relay_parent, + storage_root: relay_parent_storage_root, + }; + + for (pvd, candidate) in candidates.iter() { + storage.add_candidate(candidate.clone(), pvd.clone()).unwrap(); + } + let candidates = + candidates.into_iter().map(|(_pvd, candidate)| candidate).collect::>(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_info, + base_constraints, + vec![], + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + assert_eq!(tree.candidates().collect::>().len(), candidates.len()); + assert_eq!(tree.nodes.len(), expected_node_count); + + // Do some common tests on both trees. + { + // No ancestors supplied. + assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(Ancestors::new(), 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + // Ancestor which is not part of the tree. Will be ignored. + let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + // A chain fork. + let ancestors: Ancestors = + [(candidates[0].hash()), (candidates[7].hash())].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()), None); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + + // Ancestors which are part of the tree but don't form a path. Will be ignored. + let ancestors: Ancestors = + [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Valid ancestors. + let ancestors: Ancestors = [candidates[7].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[7].hash()); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[2].hash()); + assert_eq!( + tree.find_backable_chain(ancestors.clone(), 2, |_| true), + [3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Valid ancestors with candidates which have been omitted due to timeouts + let ancestors: Ancestors = + [candidates[0].hash(), candidates[2].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[0].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 3, |_| true), + [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[1].hash()); + if has_cycle { + assert_eq!( + tree.find_backable_chain(ancestors, 2, |_| true), + [2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + } else { + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + } + + let ancestors: Ancestors = + [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + assert_eq!(res, NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Requested count is 0. + assert_eq!(tree.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] + .into_iter() + .collect(); + assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash()].into_iter().collect(); + assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); + } + + // Now do some tests only on the tree with cycles + if has_cycle { + // Exceeds the maximum tree depth. 0-1-2-3-4-1-2-3-4, when the tree stops at + // 0-1-2-3-4-1-2-3. + let ancestors: Ancestors = [ + candidates[0].hash(), + candidates[1].hash(), + candidates[2].hash(), + candidates[3].hash(), + candidates[4].hash(), + ] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[4].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // 0-1-2. + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash(), candidates[2].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[2].hash()); + assert_eq!( + tree.find_backable_chain(ancestors.clone(), 1, |_| true), + [3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + assert_eq!( + tree.find_backable_chain(ancestors, 5, |_| true), + [3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // 0-1 + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[1].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 6, |_| true), + [2, 3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>(), + ); + + // For 0-1-2-3-4-5, there's more than 1 way of finding this path in + // the tree. `None` should be returned. The runtime should not have accepted this. + let ancestors: Ancestors = [ + candidates[0].hash(), + candidates[1].hash(), + candidates[2].hash(), + candidates[3].hash(), + candidates[4].hash(), + candidates[5].hash(), + ] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()); + assert_eq!(res, None); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + } + } + #[test] fn graceful_cycle_of_0() { let mut storage = CandidateStorage::new(); @@ -1602,13 +2071,17 @@ mod tests { for count in 1..10 { assert_eq!( - tree.select_children(&[], count, |_| true), + tree.find_backable_chain(Ancestors::new(), count, |_| true), iter::repeat(candidate_a_hash) .take(std::cmp::min(count as usize, max_depth + 1)) .collect::>() ); assert_eq!( - tree.select_children(&[candidate_a_hash], count - 1, |_| true), + tree.find_backable_chain( + [candidate_a_hash].into_iter().collect(), + count - 1, + |_| true + ), iter::repeat(candidate_a_hash) .take(std::cmp::min(count as usize - 1, max_depth)) .collect::>() @@ -1682,22 +2155,22 @@ mod tests { assert_eq!(tree.nodes[3].candidate_hash, candidate_b_hash); assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); - assert_eq!(tree.select_children(&[], 1, |_| true), vec![candidate_a_hash],); + assert_eq!(tree.find_backable_chain(Ancestors::new(), 1, |_| true), vec![candidate_a_hash],); assert_eq!( - tree.select_children(&[], 2, |_| true), + tree.find_backable_chain(Ancestors::new(), 2, |_| true), vec![candidate_a_hash, candidate_b_hash], ); assert_eq!( - tree.select_children(&[], 3, |_| true), + tree.find_backable_chain(Ancestors::new(), 3, |_| true), vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], ); assert_eq!( - tree.select_children(&[candidate_a_hash], 2, |_| true), + tree.find_backable_chain([candidate_a_hash].into_iter().collect(), 2, |_| true), vec![candidate_b_hash, candidate_a_hash], ); assert_eq!( - tree.select_children(&[], 6, |_| true), + tree.find_backable_chain(Ancestors::new(), 6, |_| true), vec![ candidate_a_hash, candidate_b_hash, @@ -1706,10 +2179,17 @@ mod tests { candidate_a_hash ], ); - assert_eq!( - tree.select_children(&[candidate_a_hash, candidate_b_hash], 6, |_| true), - vec![candidate_a_hash, candidate_b_hash, candidate_a_hash,], - ); + + for count in 3..7 { + assert_eq!( + tree.find_backable_chain( + [candidate_a_hash, candidate_b_hash].into_iter().collect(), + count, + |_| true + ), + vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], + ); + } } #[test] diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 5937a1c1fb9..2b14e09b4fb 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -35,7 +35,7 @@ use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::{ - ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, + Ancestors, ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, HypotheticalFrontierRequest, IntroduceCandidateRequest, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage, RuntimeApiRequest, }, @@ -150,16 +150,9 @@ async fn run_iteration( relay_parent, para, count, - required_path, + ancestors, tx, - ) => answer_get_backable_candidates( - &view, - relay_parent, - para, - count, - required_path, - tx, - ), + ) => answer_get_backable_candidates(&view, relay_parent, para, count, ancestors, tx), ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx) => answer_hypothetical_frontier_request(&view, request, tx), ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) => @@ -565,7 +558,7 @@ fn answer_get_backable_candidates( relay_parent: Hash, para: ParaId, count: u32, - required_path: Vec, + ancestors: Ancestors, tx: oneshot::Sender>, ) { let data = match view.active_leaves.get(&relay_parent) { @@ -614,7 +607,7 @@ fn answer_get_backable_candidates( }; let backable_candidates: Vec<_> = tree - .select_children(&required_path, count, |candidate| storage.is_backed(candidate)) + .find_backable_chain(ancestors.clone(), count, |candidate| storage.is_backed(candidate)) .into_iter() .filter_map(|child_hash| { storage.relay_parent_by_candidate_hash(&child_hash).map_or_else( @@ -635,7 +628,7 @@ fn answer_get_backable_candidates( if backable_candidates.is_empty() { gum::trace!( target: LOG_TARGET, - ?required_path, + ?ancestors, para_id = ?para, %relay_parent, "Could not find any backable candidate", diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index 732736b101d..0beddbf1416 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -407,7 +407,7 @@ async fn get_backable_candidates( virtual_overseer: &mut VirtualOverseer, leaf: &TestLeaf, para_id: ParaId, - required_path: Vec, + ancestors: Ancestors, count: u32, expected_result: Vec<(CandidateHash, Hash)>, ) { @@ -415,11 +415,7 @@ async fn get_backable_candidates( virtual_overseer .send(overseer::FromOrchestra::Communication { msg: ProspectiveParachainsMessage::GetBackableCandidates( - leaf.hash, - para_id, - count, - required_path, - tx, + leaf.hash, para_id, count, ancestors, tx, ), }) .await; @@ -903,7 +899,7 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![], ) @@ -912,12 +908,20 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), + 0, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::new(), 0, vec![], ) .await; - get_backable_candidates(&mut virtual_overseer, &leaf_a, 1.into(), vec![], 0, vec![]).await; // Second candidates. second_candidate(&mut virtual_overseer, candidate_a.clone()).await; @@ -928,7 +932,7 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![], ) @@ -939,12 +943,20 @@ fn check_backable_query_single_candidate() { back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; // Should not get any backable candidates for the other para. - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 1, vec![]).await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 2.into(), - vec![candidate_hash_a], + Ancestors::new(), + 1, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + vec![candidate_hash_a].into_iter().collect(), 1, vec![], ) @@ -955,7 +967,7 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![], + Ancestors::new(), 1, vec![(candidate_hash_a, leaf_a.hash)], ) @@ -964,20 +976,20 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![(candidate_hash_b, leaf_a.hash)], ) .await; - // Should not get anything at the wrong path. + // Wrong path get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b], + vec![candidate_hash_b].into_iter().collect(), 1, - vec![], + vec![(candidate_hash_a, leaf_a.hash)], ) .await; @@ -1075,15 +1087,29 @@ fn check_backable_query_multiple_candidates() { make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_i, 10); // Should not get any backable candidates for the other para. - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 1, vec![]) - .await; - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 5, vec![]) - .await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 2.into(), - vec![candidate_hash_a], + Ancestors::new(), + 1, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + Ancestors::new(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + vec![candidate_hash_a].into_iter().collect(), 1, vec![], ) @@ -1097,7 +1123,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![], + Ancestors::new(), 1, vec![(candidate_hash_a, leaf_a.hash)], ) @@ -1106,7 +1132,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![], + Ancestors::new(), 4, vec![ (candidate_hash_a, leaf_a.hash), @@ -1124,7 +1150,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![(candidate_hash_b, leaf_a.hash)], ) @@ -1133,16 +1159,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], - 2, - vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_d, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 3, vec![ (candidate_hash_b, leaf_a.hash), @@ -1159,7 +1176,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), count, vec![ (candidate_hash_c, leaf_a.hash), @@ -1172,26 +1189,30 @@ fn check_backable_query_multiple_candidates() { } } - // required path of 2 + // required path of 2 and higher { get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_b], + vec![candidate_hash_a, candidate_hash_i, candidate_hash_h, candidate_hash_c] + .into_iter() + .collect(), 1, - vec![(candidate_hash_d, leaf_a.hash)], + vec![(candidate_hash_j, leaf_a.hash)], ) .await; + get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_c], + vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), 1, - vec![(candidate_hash_h, leaf_a.hash)], + vec![(candidate_hash_d, leaf_a.hash)], ) .await; + // If the requested count exceeds the largest chain, return the longest // chain we can get. for count in 4..10 { @@ -1199,7 +1220,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_c], + vec![candidate_hash_a, candidate_hash_c].into_iter().collect(), count, vec![ (candidate_hash_h, leaf_a.hash), @@ -1213,317 +1234,127 @@ fn check_backable_query_multiple_candidates() { // No more candidates in any chain. { - let required_paths = vec![ - vec![candidate_hash_a, candidate_hash_b, candidate_hash_e], - vec![ - candidate_hash_a, - candidate_hash_c, - candidate_hash_h, - candidate_hash_i, - candidate_hash_j, - ], - ]; - for path in required_paths { - for count in 1..4 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - path.clone(), - count, - vec![], - ) - .await; - } + for count in 1..4 { + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, candidate_hash_b, candidate_hash_e] + .into_iter() + .collect(), + count, + vec![], + ) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![ + candidate_hash_a, + candidate_hash_c, + candidate_hash_h, + candidate_hash_i, + candidate_hash_j, + ] + .into_iter() + .collect(), + count, + vec![], + ) + .await; } } - // Should not get anything at the wrong path. + // Wrong paths. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b], + vec![candidate_hash_b].into_iter().collect(), 1, - vec![], + vec![(candidate_hash_a, leaf_a.hash)], ) .await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b, candidate_hash_a], + vec![candidate_hash_b, candidate_hash_f].into_iter().collect(), 3, - vec![], + vec![ + (candidate_hash_a, leaf_a.hash), + (candidate_hash_b, leaf_a.hash), + (candidate_hash_d, leaf_a.hash), + ], ) .await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_b, candidate_hash_c], - 3, - vec![], + vec![candidate_hash_a, candidate_hash_h].into_iter().collect(), + 4, + vec![ + (candidate_hash_c, leaf_a.hash), + (candidate_hash_h, leaf_a.hash), + (candidate_hash_i, leaf_a.hash), + (candidate_hash_j, leaf_a.hash), + ], ) .await; - - virtual_overseer - }); - - assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - // 10 candidates and 7 parents on para 1. - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (7, 10)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); - } - - // A tree with multiple roots. - // Parachain 1 looks like this: - // (imaginary root) - // | | - // +----B---+ A - // | | | | - // | | | C - // D E F | - // | H - // G | - // I - // | - // J - { - let test_state = TestState::default(); - let view = test_harness(|mut virtual_overseer| async move { - // Leaf A - let leaf_a = TestLeaf { - number: 100, - hash: Hash::from_low_u64_be(130), - para_data: vec![ - (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), - (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), - ], - }; - - // Activate leaves. - activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; - - // Candidate B - let (candidate_b, pvd_b) = make_candidate( - leaf_a.hash, - leaf_a.number, + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, 1.into(), - HeadData(vec![1, 2, 3]), - HeadData(vec![2]), - test_state.validation_code_hash, - ); - let candidate_hash_b = candidate_b.hash(); - introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; - second_candidate(&mut virtual_overseer, candidate_b.clone()).await; - back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; + vec![candidate_hash_e, candidate_hash_h].into_iter().collect(), + 2, + vec![(candidate_hash_a, leaf_a.hash), (candidate_hash_b, leaf_a.hash)], + ) + .await; - // Candidate A - let (candidate_a, pvd_a) = make_candidate( - leaf_a.hash, - leaf_a.number, + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, 1.into(), - HeadData(vec![1, 2, 3]), - HeadData(vec![1]), - test_state.validation_code_hash, - ); - let candidate_hash_a = candidate_a.hash(); - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; - second_candidate(&mut virtual_overseer, candidate_a.clone()).await; - back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; - - let (candidate_c, candidate_hash_c) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_a, 3); - let (_candidate_d, candidate_hash_d) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 4); - let (_candidate_e, candidate_hash_e) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 5); - let (candidate_f, candidate_hash_f) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 6); - let (_candidate_g, candidate_hash_g) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_f, 7); - let (candidate_h, candidate_hash_h) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_c, 8); - let (candidate_i, candidate_hash_i) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_h, 9); - let (_candidate_j, candidate_hash_j) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_i, 10); + vec![candidate_hash_a, candidate_hash_c, candidate_hash_d].into_iter().collect(), + 2, + vec![(candidate_hash_h, leaf_a.hash), (candidate_hash_i, leaf_a.hash)], + ) + .await; - // Should not get any backable candidates for the other para. - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 1, vec![]) - .await; - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 5, vec![]) - .await; + // Parachain fork. get_backable_candidates( &mut virtual_overseer, &leaf_a, - 2.into(), - vec![candidate_hash_a], + 1.into(), + vec![candidate_hash_a, candidate_hash_b, candidate_hash_c].into_iter().collect(), 1, vec![], ) .await; - // Test various scenarios with various counts. - - // empty required_path - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![], - 1, - vec![(candidate_hash_b, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![], - 2, - vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_d, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![], - 4, - vec![ - (candidate_hash_a, leaf_a.hash), - (candidate_hash_c, leaf_a.hash), - (candidate_hash_h, leaf_a.hash), - (candidate_hash_i, leaf_a.hash), - ], - ) - .await; - } - - // required path of 1 - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a], - 1, - vec![(candidate_hash_c, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_b], - 1, - vec![(candidate_hash_d, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a], - 2, - vec![(candidate_hash_c, leaf_a.hash), (candidate_hash_h, leaf_a.hash)], - ) - .await; - - // If the requested count exceeds the largest chain, return the longest - // chain we can get. - for count in 2..10 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_b], - count, - vec![(candidate_hash_f, leaf_a.hash), (candidate_hash_g, leaf_a.hash)], - ) - .await; - } - } - - // required path of 2 - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_b, candidate_hash_f], - 1, - vec![(candidate_hash_g, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_c], - 1, - vec![(candidate_hash_h, leaf_a.hash)], - ) - .await; - // If the requested count exceeds the largest chain, return the longest - // chain we can get. - for count in 4..10 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_c], - count, - vec![ - (candidate_hash_h, leaf_a.hash), - (candidate_hash_i, leaf_a.hash), - (candidate_hash_j, leaf_a.hash), - ], - ) - .await; - } - } - - // No more candidates in any chain. - { - let required_paths = vec![ - vec![candidate_hash_b, candidate_hash_f, candidate_hash_g], - vec![candidate_hash_b, candidate_hash_e], - vec![candidate_hash_b, candidate_hash_d], - vec![ - candidate_hash_a, - candidate_hash_c, - candidate_hash_h, - candidate_hash_i, - candidate_hash_j, - ], - ]; - for path in required_paths { - for count in 1..4 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - path.clone(), - count, - vec![], - ) - .await; - } - } - } + // Non-existent candidate. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, CandidateHash(Hash::from_low_u64_be(100))] + .into_iter() + .collect(), + 2, + vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_d, leaf_a.hash)], + ) + .await; - // Should not get anything at the wrong path. + // Requested count is zero. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_d], - 1, + Ancestors::new(), + 0, vec![], ) .await; @@ -1531,8 +1362,8 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b, candidate_hash_a], - 3, + vec![candidate_hash_a].into_iter().collect(), + 0, vec![], ) .await; @@ -1540,8 +1371,8 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_c, candidate_hash_d], - 3, + vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), + 0, vec![], ) .await; @@ -1853,8 +1684,8 @@ fn check_pvd_query() { assert_eq!(view.candidate_storage.len(), 2); } -// Test simultaneously activating and deactivating leaves, and simultaneously deactivating multiple -// leaves. +// Test simultaneously activating and deactivating leaves, and simultaneously deactivating +// multiple leaves. #[test] fn correctly_updates_leaves() { let test_state = TestState::default(); @@ -2048,7 +1879,7 @@ fn persists_pending_availability_candidate() { &mut virtual_overseer, &leaf_b, para_id, - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![(candidate_hash_b, leaf_b_hash)], ) @@ -2113,7 +1944,7 @@ fn backwards_compatible() { &mut virtual_overseer, &leaf_a, para_id, - vec![], + Ancestors::new(), 1, vec![(candidate_hash_a, candidate_relay_parent)], ) @@ -2135,7 +1966,15 @@ fn backwards_compatible() { ) .await; - get_backable_candidates(&mut virtual_overseer, &leaf_b, para_id, vec![], 1, vec![]).await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + Ancestors::new(), + 1, + vec![], + ) + .await; virtual_overseer }); @@ -2162,13 +2001,13 @@ fn uses_ancestry_only_within_session() { .await; assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) - ) if parent == hash => { - tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len })).unwrap(); - } - ); + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) + ) if parent == hash => { + tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len + })).unwrap(); } + ); assert_matches!( virtual_overseer.recv().await, diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 24cdfd6b57b..2a09e2b5b2c 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -20,9 +20,11 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } futures-timer = "3.0.2" fatality = "0.0.6" +schnellru = "0.2.1" [dev-dependencies] sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +rstest = "0.18.2" diff --git a/polkadot/node/core/provisioner/src/error.rs b/polkadot/node/core/provisioner/src/error.rs index 376d69f276f..aae3234c3cc 100644 --- a/polkadot/node/core/provisioner/src/error.rs +++ b/polkadot/node/core/provisioner/src/error.rs @@ -44,14 +44,17 @@ pub enum Error { #[error("failed to get block number")] CanceledBlockNumber(#[source] oneshot::Canceled), + #[error("failed to get session index")] + CanceledSessionIndex(#[source] oneshot::Canceled), + #[error("failed to get backed candidates")] CanceledBackedCandidates(#[source] oneshot::Canceled), #[error("failed to get votes on dispute")] CanceledCandidateVotes(#[source] oneshot::Canceled), - #[error("failed to get backable candidate from prospective parachains")] - CanceledBackableCandidate(#[source] oneshot::Canceled), + #[error("failed to get backable candidates from prospective parachains")] + CanceledBackableCandidates(#[source] oneshot::Canceled), #[error(transparent)] ChainApi(#[from] ChainApiError), @@ -71,11 +74,6 @@ pub enum Error { #[error("failed to send return message with Inherents")] InherentDataReturnChannel, - #[error( - "backed candidate does not correspond to selected candidate; check logic in provisioner" - )] - BackedCandidateOrderingProblem, - #[fatal] #[error("Failed to spawn background task")] FailedToSpawnBackgroundTask, diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index a29cf72afb1..c9ed873d3c2 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -24,26 +24,29 @@ use futures::{ channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered, FutureExt, }; use futures_timer::Delay; +use schnellru::{ByLength, LruMap}; use polkadot_node_subsystem::{ jaeger, messages::{ - CandidateBackingMessage, ChainApiMessage, ProspectiveParachainsMessage, ProvisionableData, - ProvisionerInherentData, ProvisionerMessage, RuntimeApiRequest, + Ancestors, CandidateBackingMessage, ChainApiMessage, ProspectiveParachainsMessage, + ProvisionableData, ProvisionerInherentData, ProvisionerMessage, RuntimeApiRequest, }, overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::{ has_required_runtime, request_availability_cores, request_persisted_validation_data, - runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, + request_session_index_for_child, + runtime::{prospective_parachains_mode, request_node_features, ProspectiveParachainsMode}, TimeoutExt, }; use polkadot_primitives::{ - BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreState, Hash, Id as ParaId, - OccupiedCoreAssumption, SignedAvailabilityBitfield, ValidatorIndex, + vstaging::{node_features::FeatureIndex, NodeFeatures}, + BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, CoreState, Hash, + Id as ParaId, OccupiedCoreAssumption, SessionIndex, SignedAvailabilityBitfield, ValidatorIndex, }; -use std::collections::{BTreeMap, HashMap}; +use std::collections::{BTreeMap, HashMap, HashSet}; mod disputes; mod error; @@ -77,11 +80,18 @@ impl ProvisionerSubsystem { } } +/// Per-session info we need for the provisioner subsystem. +pub struct PerSession { + prospective_parachains_mode: ProspectiveParachainsMode, + elastic_scaling_mvp: bool, +} + /// A per-relay-parent state for the provisioning subsystem. pub struct PerRelayParent { leaf: ActivatedLeaf, backed_candidates: Vec, prospective_parachains_mode: ProspectiveParachainsMode, + elastic_scaling_mvp: bool, signed_bitfields: Vec, is_inherent_ready: bool, awaiting_inherent: Vec>, @@ -89,13 +99,14 @@ pub struct PerRelayParent { } impl PerRelayParent { - fn new(leaf: ActivatedLeaf, prospective_parachains_mode: ProspectiveParachainsMode) -> Self { + fn new(leaf: ActivatedLeaf, per_session: &PerSession) -> Self { let span = PerLeafSpan::new(leaf.span.clone(), "provisioner"); Self { leaf, backed_candidates: Vec::new(), - prospective_parachains_mode, + prospective_parachains_mode: per_session.prospective_parachains_mode, + elastic_scaling_mvp: per_session.elastic_scaling_mvp, signed_bitfields: Vec::new(), is_inherent_ready: false, awaiting_inherent: Vec::new(), @@ -124,10 +135,17 @@ impl ProvisionerSubsystem { async fn run(mut ctx: Context, metrics: Metrics) -> FatalResult<()> { let mut inherent_delays = InherentDelays::new(); let mut per_relay_parent = HashMap::new(); + let mut per_session = LruMap::new(ByLength::new(2)); loop { - let result = - run_iteration(&mut ctx, &mut per_relay_parent, &mut inherent_delays, &metrics).await; + let result = run_iteration( + &mut ctx, + &mut per_relay_parent, + &mut per_session, + &mut inherent_delays, + &metrics, + ) + .await; match result { Ok(()) => break, @@ -142,6 +160,7 @@ async fn run(mut ctx: Context, metrics: Metrics) -> FatalResult<()> { async fn run_iteration( ctx: &mut Context, per_relay_parent: &mut HashMap, + per_session: &mut LruMap, inherent_delays: &mut InherentDelays, metrics: &Metrics, ) -> Result<(), Error> { @@ -151,7 +170,7 @@ async fn run_iteration( // Map the error to ensure that the subsystem exits when the overseer is gone. match from_overseer.map_err(Error::OverseerExited)? { FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => - handle_active_leaves_update(ctx.sender(), update, per_relay_parent, inherent_delays).await?, + handle_active_leaves_update(ctx.sender(), update, per_relay_parent, per_session, inherent_delays).await?, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), FromOrchestra::Communication { msg } => { @@ -183,6 +202,7 @@ async fn handle_active_leaves_update( sender: &mut impl overseer::ProvisionerSenderTrait, update: ActiveLeavesUpdate, per_relay_parent: &mut HashMap, + per_session: &mut LruMap, inherent_delays: &mut InherentDelays, ) -> Result<(), Error> { gum::trace!(target: LOG_TARGET, "Handle ActiveLeavesUpdate"); @@ -191,10 +211,31 @@ async fn handle_active_leaves_update( } if let Some(leaf) = update.activated { + let session_index = request_session_index_for_child(leaf.hash, sender) + .await + .await + .map_err(Error::CanceledSessionIndex)??; + if per_session.get(&session_index).is_none() { + let prospective_parachains_mode = + prospective_parachains_mode(sender, leaf.hash).await?; + let elastic_scaling_mvp = request_node_features(leaf.hash, session_index, sender) + .await? + .unwrap_or(NodeFeatures::EMPTY) + .get(FeatureIndex::ElasticScalingMVP as usize) + .map(|b| *b) + .unwrap_or(false); + + per_session.insert( + session_index, + PerSession { prospective_parachains_mode, elastic_scaling_mvp }, + ); + } + + let session_info = per_session.get(&session_index).expect("Just inserted"); + gum::trace!(target: LOG_TARGET, leaf_hash=?leaf.hash, "Adding delay"); - let prospective_parachains_mode = prospective_parachains_mode(sender, leaf.hash).await?; let delay_fut = Delay::new(PRE_PROPOSE_TIMEOUT).map(move |_| leaf.hash).boxed(); - per_relay_parent.insert(leaf.hash, PerRelayParent::new(leaf, prospective_parachains_mode)); + per_relay_parent.insert(leaf.hash, PerRelayParent::new(leaf, session_info)); inherent_delays.push(delay_fut); } @@ -253,6 +294,7 @@ async fn send_inherent_data_bg( let signed_bitfields = per_relay_parent.signed_bitfields.clone(); let backed_candidates = per_relay_parent.backed_candidates.clone(); let mode = per_relay_parent.prospective_parachains_mode; + let elastic_scaling_mvp = per_relay_parent.elastic_scaling_mvp; let span = per_relay_parent.span.child("req-inherent-data"); let mut sender = ctx.sender().clone(); @@ -272,6 +314,7 @@ async fn send_inherent_data_bg( &signed_bitfields, &backed_candidates, mode, + elastic_scaling_mvp, return_senders, &mut sender, &metrics, @@ -383,6 +426,7 @@ async fn send_inherent_data( bitfields: &[SignedAvailabilityBitfield], candidates: &[CandidateReceipt], prospective_parachains_mode: ProspectiveParachainsMode, + elastic_scaling_mvp: bool, return_senders: Vec>, from_job: &mut impl overseer::ProvisionerSenderTrait, metrics: &Metrics, @@ -434,6 +478,7 @@ async fn send_inherent_data( &bitfields, candidates, prospective_parachains_mode, + elastic_scaling_mvp, leaf.hash, from_job, ) @@ -558,6 +603,8 @@ async fn select_candidate_hashes_from_tracked( let mut selected_candidates = Vec::with_capacity(candidates.len().min(availability_cores.len())); + let mut selected_parachains = + HashSet::with_capacity(candidates.len().min(availability_cores.len())); gum::debug!( target: LOG_TARGET, @@ -591,6 +638,12 @@ async fn select_candidate_hashes_from_tracked( CoreState::Free => continue, }; + if selected_parachains.contains(&scheduled_core.para_id) { + // We already picked a candidate for this parachain. Elastic scaling only works with + // prospective parachains mode. + continue + } + let validation_data = match request_persisted_validation_data( relay_parent, scheduled_core.para_id, @@ -624,6 +677,7 @@ async fn select_candidate_hashes_from_tracked( "Selected candidate receipt", ); + selected_parachains.insert(candidate.descriptor.para_id); selected_candidates.push((candidate_hash, candidate.descriptor.relay_parent)); } } @@ -637,70 +691,93 @@ async fn select_candidate_hashes_from_tracked( /// Should be called when prospective parachains are enabled. async fn request_backable_candidates( availability_cores: &[CoreState], + elastic_scaling_mvp: bool, bitfields: &[SignedAvailabilityBitfield], relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, ) -> Result, Error> { let block_number = get_block_number_under_construction(relay_parent, sender).await?; - let mut selected_candidates = Vec::with_capacity(availability_cores.len()); + // Record how many cores are scheduled for each paraid. Use a BTreeMap because + // we'll need to iterate through them. + let mut scheduled_cores: BTreeMap = BTreeMap::new(); + // The on-chain ancestors of a para present in availability-cores. + let mut ancestors: HashMap = + HashMap::with_capacity(availability_cores.len()); for (core_idx, core) in availability_cores.iter().enumerate() { - let (para_id, required_path) = match core { + let core_idx = CoreIndex(core_idx as u32); + match core { CoreState::Scheduled(scheduled_core) => { - // The core is free, pick the first eligible candidate from - // the fragment tree. - (scheduled_core.para_id, Vec::new()) + *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; }, CoreState::Occupied(occupied_core) => { - if bitfields_indicate_availability(core_idx, bitfields, &occupied_core.availability) - { + let is_available = bitfields_indicate_availability( + core_idx.0 as usize, + bitfields, + &occupied_core.availability, + ); + + if is_available { + ancestors + .entry(occupied_core.para_id()) + .or_default() + .insert(occupied_core.candidate_hash); + if let Some(ref scheduled_core) = occupied_core.next_up_on_available { - // The candidate occupying the core is available, choose its - // child in the fragment tree. - // - // TODO: doesn't work for on-demand parachains. We lean hard on the - // assumption that cores are fixed to specific parachains within a session. - // https://github.com/paritytech/polkadot/issues/5492 - (scheduled_core.para_id, vec![occupied_core.candidate_hash]) - } else { - continue - } - } else { - if occupied_core.time_out_at != block_number { - continue + // Request a new backable candidate for the newly scheduled para id. + *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; } + } else if occupied_core.time_out_at <= block_number { + // Timed out before being available. + if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out { // Candidate's availability timed out, practically same as scheduled. - (scheduled_core.para_id, Vec::new()) - } else { - continue + *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; } + } else { + // Not timed out and not available. + ancestors + .entry(occupied_core.para_id()) + .or_default() + .insert(occupied_core.candidate_hash); } }, CoreState::Free => continue, }; + } - // We should be calling this once per para rather than per core. - // TODO: Will be fixed in https://github.com/paritytech/polkadot-sdk/pull/3233. - // For now, at least make sure we don't supply the same candidate multiple times in case a - // para has multiple cores scheduled. - let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; - match response { - Some((hash, relay_parent)) => { - if !selected_candidates.iter().any(|bc| &(hash, relay_parent) == bc) { - selected_candidates.push((hash, relay_parent)) - } - }, - None => { - gum::debug!( - target: LOG_TARGET, - leaf_hash = ?relay_parent, - core = core_idx, - "No backable candidate returned by prospective parachains", - ); - }, + let mut selected_candidates: Vec<(CandidateHash, Hash)> = + Vec::with_capacity(availability_cores.len()); + + for (para_id, core_count) in scheduled_cores { + let para_ancestors = ancestors.remove(¶_id).unwrap_or_default(); + + // If elastic scaling MVP is disabled, only allow one candidate per parachain. + if !elastic_scaling_mvp && core_count > 1 { + continue } + + let response = get_backable_candidates( + relay_parent, + para_id, + para_ancestors, + core_count as u32, + sender, + ) + .await?; + + if response.is_empty() { + gum::debug!( + target: LOG_TARGET, + leaf_hash = ?relay_parent, + ?para_id, + "No backable candidate returned by prospective parachains", + ); + continue + } + + selected_candidates.extend(response.into_iter().take(core_count)); } Ok(selected_candidates) @@ -713,6 +790,7 @@ async fn select_candidates( bitfields: &[SignedAvailabilityBitfield], candidates: &[CandidateReceipt], prospective_parachains_mode: ProspectiveParachainsMode, + elastic_scaling_mvp: bool, relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, ) -> Result, Error> { @@ -722,7 +800,14 @@ async fn select_candidates( let selected_candidates = match prospective_parachains_mode { ProspectiveParachainsMode::Enabled { .. } => - request_backable_candidates(availability_cores, bitfields, relay_parent, sender).await?, + request_backable_candidates( + availability_cores, + elastic_scaling_mvp, + bitfields, + relay_parent, + sender, + ) + .await?, ProspectiveParachainsMode::Disabled => select_candidate_hashes_from_tracked( availability_cores, @@ -745,24 +830,6 @@ async fn select_candidates( gum::trace!(target: LOG_TARGET, leaf_hash=?relay_parent, "Got {} backed candidates", candidates.len()); - // `selected_candidates` is generated in ascending order by core index, and - // `GetBackedCandidates` _should_ preserve that property, but let's just make sure. - // - // We can't easily map from `BackedCandidate` to `core_idx`, but we know that every selected - // candidate maps to either 0 or 1 backed candidate, and the hashes correspond. Therefore, by - // checking them in order, we can ensure that the backed candidates are also in order. - let mut backed_idx = 0; - for selected in selected_candidates { - if selected.0 == - candidates.get(backed_idx).ok_or(Error::BackedCandidateOrderingProblem)?.hash() - { - backed_idx += 1; - } - } - if candidates.len() != backed_idx { - Err(Error::BackedCandidateOrderingProblem)?; - } - // keep only one candidate with validation code. let mut with_validation_code = false; candidates.retain(|c| { @@ -804,28 +871,27 @@ async fn get_block_number_under_construction( } } -/// Requests backable candidate from Prospective Parachains based on -/// the given path in the fragment tree. -async fn get_backable_candidate( +/// Requests backable candidates from Prospective Parachains based on +/// the given ancestors in the fragment tree. The ancestors may not be ordered. +async fn get_backable_candidates( relay_parent: Hash, para_id: ParaId, - required_path: Vec, + ancestors: Ancestors, + count: u32, sender: &mut impl overseer::ProvisionerSenderTrait, -) -> Result, Error> { +) -> Result, Error> { let (tx, rx) = oneshot::channel(); sender .send_message(ProspectiveParachainsMessage::GetBackableCandidates( relay_parent, para_id, - 1, // core count hardcoded to 1, until elastic scaling is implemented and enabled. - required_path, + count, + ancestors, tx, )) .await; - rx.await - .map_err(Error::CanceledBackableCandidate) - .map(|res| res.get(0).copied()) + rx.await.map_err(Error::CanceledBackableCandidates) } /// The availability bitfield for a given core is the transpose diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index 87c0e7a65d3..bdb4f85f400 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -22,6 +22,9 @@ use polkadot_primitives::{OccupiedCore, ScheduledCore}; const MOCK_GROUP_SIZE: usize = 5; pub fn occupied_core(para_id: u32) -> CoreState { + let mut candidate_descriptor = dummy_candidate_descriptor(dummy_hash()); + candidate_descriptor.para_id = para_id.into(); + CoreState::Occupied(OccupiedCore { group_responsible: para_id.into(), next_up_on_available: None, @@ -29,7 +32,7 @@ pub fn occupied_core(para_id: u32) -> CoreState { time_out_at: 200_u32, next_up_on_time_out: None, availability: bitvec![u8, bitvec::order::Lsb0; 0; 32], - candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), + candidate_descriptor, candidate_hash: Default::default(), }) } @@ -254,10 +257,56 @@ mod select_candidates { use polkadot_primitives::{ BlockNumber, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData, }; + use rstest::rstest; const BLOCK_UNDER_PRODUCTION: BlockNumber = 128; - // For test purposes, we always return this set of availability cores: + fn dummy_candidate_template() -> CandidateReceipt { + let empty_hash = PersistedValidationData::::default().hash(); + + let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); + descriptor_template.persisted_validation_data_hash = empty_hash; + CandidateReceipt { + descriptor: descriptor_template, + commitments_hash: CandidateCommitments::default().hash(), + } + } + + fn make_candidates( + core_count: usize, + expected_backed_indices: Vec, + ) -> (Vec, Vec) { + let candidate_template = dummy_candidate_template(); + let candidates: Vec<_> = std::iter::repeat(candidate_template) + .take(core_count) + .enumerate() + .map(|(idx, mut candidate)| { + candidate.descriptor.para_id = idx.into(); + candidate + }) + .collect(); + + let expected_backed = expected_backed_indices + .iter() + .map(|&idx| candidates[idx].clone()) + .map(|c| { + BackedCandidate::new( + CommittedCandidateReceipt { + descriptor: c.descriptor.clone(), + commitments: Default::default(), + }, + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + None, + ) + }) + .collect(); + let candidate_hashes = candidates.into_iter().map(|c| c.hash()).collect(); + + (candidate_hashes, expected_backed) + } + + // For testing only one core assigned to a parachain, we return this set of availability cores: // // [ // 0: Free, @@ -273,7 +322,7 @@ mod select_candidates { // 10: Occupied(both next_up set, not available, timeout), // 11: Occupied(next_up_on_available and available, but different successor para_id) // ] - fn mock_availability_cores() -> Vec { + fn mock_availability_cores_one_per_para() -> Vec { use std::ops::Not; use CoreState::{Free, Scheduled}; @@ -292,6 +341,7 @@ mod select_candidates { build_occupied_core(4, |core| { core.next_up_on_available = Some(scheduled_core(4)); core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(41)); }), // 5: Occupied(next_up_on_time_out set but not timeout), build_occupied_core(5, |core| { @@ -307,12 +357,14 @@ mod select_candidates { build_occupied_core(7, |core| { core.next_up_on_time_out = Some(scheduled_core(7)); core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(71)); }), // 8: Occupied(both next_up set, available), build_occupied_core(8, |core| { core.next_up_on_available = Some(scheduled_core(8)); core.next_up_on_time_out = Some(scheduled_core(8)); core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(81)); }), // 9: Occupied(both next_up set, not available, no timeout), build_occupied_core(9, |core| { @@ -324,6 +376,7 @@ mod select_candidates { core.next_up_on_available = Some(scheduled_core(10)); core.next_up_on_time_out = Some(scheduled_core(10)); core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(101)); }), // 11: Occupied(next_up_on_available and available, but different successor para_id) build_occupied_core(11, |core| { @@ -333,20 +386,189 @@ mod select_candidates { ] } + // For test purposes with multiple possible cores assigned to a para, we always return this set + // of availability cores: + fn mock_availability_cores_multiple_per_para() -> Vec { + use std::ops::Not; + use CoreState::{Free, Scheduled}; + + vec![ + // 0: Free, + Free, + // 1: Scheduled(default), + Scheduled(scheduled_core(1)), + // 2: Occupied(no next_up set), + occupied_core(2), + // 3: Occupied(next_up_on_available set but not available), + build_occupied_core(3, |core| { + core.next_up_on_available = Some(scheduled_core(3)); + }), + // 4: Occupied(next_up_on_available set and available), + build_occupied_core(4, |core| { + core.next_up_on_available = Some(scheduled_core(4)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(41)); + }), + // 5: Occupied(next_up_on_time_out set but not timeout), + build_occupied_core(5, |core| { + core.next_up_on_time_out = Some(scheduled_core(5)); + }), + // 6: Occupied(next_up_on_time_out set and timeout but available), + build_occupied_core(6, |core| { + core.next_up_on_time_out = Some(scheduled_core(6)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.availability = core.availability.clone().not(); + }), + // 7: Occupied(next_up_on_time_out set and timeout and not available), + build_occupied_core(7, |core| { + core.next_up_on_time_out = Some(scheduled_core(7)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(71)); + }), + // 8: Occupied(both next_up set, available), + build_occupied_core(8, |core| { + core.next_up_on_available = Some(scheduled_core(8)); + core.next_up_on_time_out = Some(scheduled_core(8)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(81)); + }), + // 9: Occupied(both next_up set, not available, no timeout), + build_occupied_core(9, |core| { + core.next_up_on_available = Some(scheduled_core(9)); + core.next_up_on_time_out = Some(scheduled_core(9)); + }), + // 10: Occupied(both next_up set, not available, timeout), + build_occupied_core(10, |core| { + core.next_up_on_available = Some(scheduled_core(10)); + core.next_up_on_time_out = Some(scheduled_core(10)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(101)); + }), + // 11: Occupied(next_up_on_available and available, but different successor para_id) + build_occupied_core(11, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + }), + // 12-14: Occupied(next_up_on_available and available, same para_id). + build_occupied_core(12, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(121)); + }), + build_occupied_core(12, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(122)); + }), + build_occupied_core(12, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(123)); + }), + // 15: Scheduled on same para_id as 12-14. + Scheduled(scheduled_core(12)), + // 16: Occupied(13, no next_up set, not available) + build_occupied_core(13, |core| { + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(131)); + }), + // 17: Occupied(13, no next_up set, available) + build_occupied_core(13, |core| { + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(132)); + }), + // 18: Occupied(13, next_up_on_available set to 13 but not available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(133)); + }), + // 19: Occupied(13, next_up_on_available set to 13 and available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(134)); + }), + // 20: Occupied(13, next_up_on_time_out set to 13 but not timeout) + build_occupied_core(13, |core| { + core.next_up_on_time_out = Some(scheduled_core(13)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(135)); + }), + // 21: Occupied(13, next_up_on_available set to 14 and available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(136)); + }), + // 22: Occupied(13, next_up_on_available set to 14 but not available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(137)); + }), + // 23: Occupied(13, both next_up set to 14, available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.next_up_on_time_out = Some(scheduled_core(14)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(138)); + }), + // 24: Occupied(13, both next_up set to 14, not available, timeout) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.next_up_on_time_out = Some(scheduled_core(14)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(1399)); + }), + // 25: Occupied(13, next_up_on_available and available, but successor para_id 15) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(15)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(139)); + }), + // 26: Occupied(15, next_up_on_available and available, but successor para_id 13) + build_occupied_core(15, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(151)); + }), + // 27: Occupied(15, both next_up, both available and timed out) + build_occupied_core(15, |core| { + core.next_up_on_available = Some(scheduled_core(15)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(152)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + }), + // 28: Occupied(13, both next_up set to 13, not available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.next_up_on_time_out = Some(scheduled_core(13)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(1398)); + }), + // 29: Occupied(13, both next_up set to 13, not available, timeout) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.next_up_on_time_out = Some(scheduled_core(13)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(1397)); + }), + ] + } + async fn mock_overseer( mut receiver: mpsc::UnboundedReceiver, - expected: Vec, + mock_availability_cores: Vec, + mut expected: Vec, + mut expected_ancestors: HashMap, Ancestors>, prospective_parachains_mode: ProspectiveParachainsMode, ) { use ChainApiMessage::BlockNumber; use RuntimeApiMessage::Request; + let mut backed_iter = expected.clone().into_iter(); + + expected.sort_by_key(|c| c.candidate().descriptor.para_id); let mut candidates_iter = expected .iter() .map(|candidate| (candidate.hash(), candidate.descriptor().relay_parent)); - let mut backed_iter = expected.clone().into_iter(); - while let Some(from_job) = receiver.next().await { match from_job { AllMessages::ChainApi(BlockNumber(_relay_parent, tx)) => @@ -356,7 +578,7 @@ mod select_candidates { PersistedValidationDataReq(_para_id, _assumption, tx), )) => tx.send(Ok(Some(Default::default()))).unwrap(), AllMessages::RuntimeApi(Request(_parent_hash, AvailabilityCores(tx))) => - tx.send(Ok(mock_availability_cores())).unwrap(), + tx.send(Ok(mock_availability_cores.clone())).unwrap(), AllMessages::CandidateBacking(CandidateBackingMessage::GetBackedCandidates( hashes, sender, @@ -373,35 +595,71 @@ mod select_candidates { let _ = sender.send(response); }, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetBackableCandidates(_, _, count, _, tx), - ) => { - assert_eq!(count, 1); - - match prospective_parachains_mode { - ProspectiveParachainsMode::Enabled { .. } => { - let _ = - tx.send(candidates_iter.next().map_or_else(Vec::new, |c| vec![c])); - }, - ProspectiveParachainsMode::Disabled => - panic!("unexpected prospective parachains request"), - } + ProspectiveParachainsMessage::GetBackableCandidates( + _, + _para_id, + count, + actual_ancestors, + tx, + ), + ) => match prospective_parachains_mode { + ProspectiveParachainsMode::Enabled { .. } => { + assert!(count > 0); + let candidates = + (&mut candidates_iter).take(count as usize).collect::>(); + assert_eq!(candidates.len(), count as usize); + + if !expected_ancestors.is_empty() { + if let Some(expected_required_ancestors) = expected_ancestors.remove( + &(candidates + .clone() + .into_iter() + .take(actual_ancestors.len()) + .map(|(c_hash, _)| c_hash) + .collect::>()), + ) { + assert_eq!(expected_required_ancestors, actual_ancestors); + } else { + assert_eq!(actual_ancestors.len(), 0); + } + } + + let _ = tx.send(candidates); + }, + ProspectiveParachainsMode::Disabled => + panic!("unexpected prospective parachains request"), }, _ => panic!("Unexpected message: {:?}", from_job), } } + + if let ProspectiveParachainsMode::Enabled { .. } = prospective_parachains_mode { + assert_eq!(candidates_iter.next(), None); + } + assert_eq!(expected_ancestors.len(), 0); } - #[test] - fn can_succeed() { + #[rstest] + #[case(ProspectiveParachainsMode::Disabled)] + #[case(ProspectiveParachainsMode::Enabled {max_candidate_depth: 0, allowed_ancestry_len: 0})] + fn can_succeed(#[case] prospective_parachains_mode: ProspectiveParachainsMode) { test_harness( - |r| mock_overseer(r, Vec::new(), ProspectiveParachainsMode::Disabled), + |r| { + mock_overseer( + r, + Vec::new(), + Vec::new(), + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { - let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; select_candidates( &[], &[], &[], prospective_parachains_mode, + false, Default::default(), &mut tx, ) @@ -411,22 +669,22 @@ mod select_candidates { ) } - // this tests that only the appropriate candidates get selected. - // To accomplish this, we supply a candidate list containing one candidate per possible core; - // the candidate selection algorithm must filter them to the appropriate set - #[test] - fn selects_correct_candidates() { - let mock_cores = mock_availability_cores(); - - let empty_hash = PersistedValidationData::::default().hash(); - - let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); - descriptor_template.persisted_validation_data_hash = empty_hash; - let candidate_template = CandidateReceipt { - descriptor: descriptor_template, - commitments_hash: CandidateCommitments::default().hash(), - }; - + // Test candidate selection when prospective parachains mode is disabled. + // This tests that only the appropriate candidates get selected when prospective parachains mode + // is disabled. To accomplish this, we supply a candidate list containing one candidate per + // possible core; the candidate selection algorithm must filter them to the appropriate set + #[rstest] + // why those particular indices? see the comments on mock_availability_cores_*() functions. + #[case(mock_availability_cores_one_per_para(), vec![1, 4, 7, 8, 10], true)] + #[case(mock_availability_cores_one_per_para(), vec![1, 4, 7, 8, 10], false)] + #[case(mock_availability_cores_multiple_per_para(), vec![1, 4, 7, 8, 10, 12, 13, 14, 15], true)] + #[case(mock_availability_cores_multiple_per_para(), vec![1, 4, 7, 8, 10, 12, 13, 14, 15], false)] + fn test_in_subsystem_selection( + #[case] mock_cores: Vec, + #[case] expected_candidates: Vec, + #[case] elastic_scaling_mvp: bool, + ) { + let candidate_template = dummy_candidate_template(); let candidates: Vec<_> = std::iter::repeat(candidate_template) .take(mock_cores.len()) .enumerate() @@ -453,9 +711,8 @@ mod select_candidates { }) .collect(); - // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = - [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + expected_candidates.into_iter().map(|idx| candidates[idx].clone()).collect(); let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; let expected_backed = expected_candidates @@ -473,14 +730,24 @@ mod select_candidates { }) .collect(); + let mock_cores_clone = mock_cores.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { - let result = select_candidates( + let result: Vec = select_candidates( &mock_cores, &[], &candidates, prospective_parachains_mode, + elastic_scaling_mvp, Default::default(), &mut tx, ) @@ -498,20 +765,24 @@ mod select_candidates { ) } - #[test] - fn selects_max_one_code_upgrade() { - let mock_cores = mock_availability_cores(); + #[rstest] + #[case(ProspectiveParachainsMode::Disabled)] + #[case(ProspectiveParachainsMode::Enabled {max_candidate_depth: 0, allowed_ancestry_len: 0})] + fn selects_max_one_code_upgrade( + #[case] prospective_parachains_mode: ProspectiveParachainsMode, + ) { + let mock_cores = mock_availability_cores_one_per_para(); let empty_hash = PersistedValidationData::::default().hash(); // why those particular indices? see the comments on mock_availability_cores() - // the first candidate with code is included out of [1, 4, 7, 8, 10]. - let cores = [1, 4, 7, 8, 10]; + // the first candidate with code is included out of [1, 4, 7, 8, 10, 12]. + let cores = [1, 4, 7, 8, 10, 12]; let cores_with_code = [1, 4, 8]; - let expected_cores = [1, 7, 10]; + let expected_cores = [1, 7, 10, 12]; - let committed_receipts: Vec<_> = (0..mock_cores.len()) + let committed_receipts: Vec<_> = (0..=mock_cores.len()) .map(|i| { let mut descriptor = dummy_candidate_descriptor(dummy_hash()); descriptor.para_id = i.into(); @@ -552,23 +823,32 @@ mod select_candidates { let expected_backed_filtered: Vec<_> = expected_cores.iter().map(|&idx| candidates[idx].clone()).collect(); - let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; + let mock_cores_clone = mock_cores.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, &[], &candidates, prospective_parachains_mode, + false, Default::default(), &mut tx, ) .await .unwrap(); - assert_eq!(result.len(), 3); + assert_eq!(result.len(), 4); result.into_iter().for_each(|c| { assert!( @@ -581,66 +861,214 @@ mod select_candidates { ) } - #[test] - fn request_from_prospective_parachains() { - let mock_cores = mock_availability_cores(); - let empty_hash = PersistedValidationData::::default().hash(); + #[rstest] + #[case(true)] + #[case(false)] + fn request_from_prospective_parachains_one_core_per_para(#[case] elastic_scaling_mvp: bool) { + let mock_cores = mock_availability_cores_one_per_para(); - let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); - descriptor_template.persisted_validation_data_hash = empty_hash; - let candidate_template = CandidateReceipt { - descriptor: descriptor_template, - commitments_hash: CandidateCommitments::default().hash(), - }; + // why those particular indices? see the comments on mock_availability_cores() + let expected_candidates: Vec<_> = vec![1, 4, 7, 8, 10, 12]; + let (candidates, expected_candidates) = + make_candidates(mock_cores.len() + 1, expected_candidates); - let candidates: Vec<_> = std::iter::repeat(candidate_template) - .take(mock_cores.len()) - .enumerate() - .map(|(idx, mut candidate)| { - candidate.descriptor.para_id = idx.into(); - candidate - }) - .collect(); + // Expect prospective parachains subsystem requests. + let prospective_parachains_mode = + ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; + + let mut required_ancestors: HashMap, Ancestors> = HashMap::new(); + required_ancestors.insert( + vec![candidates[4]], + vec![CandidateHash(Hash::from_low_u64_be(41))].into_iter().collect(), + ); + required_ancestors.insert( + vec![candidates[8]], + vec![CandidateHash(Hash::from_low_u64_be(81))].into_iter().collect(), + ); + + let mock_cores_clone = mock_cores.clone(); + let expected_candidates_clone = expected_candidates.clone(); + test_harness( + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_candidates_clone, + required_ancestors, + prospective_parachains_mode, + ) + }, + |mut tx: TestSubsystemSender| async move { + let result = select_candidates( + &mock_cores, + &[], + &[], + prospective_parachains_mode, + elastic_scaling_mvp, + Default::default(), + &mut tx, + ) + .await + .unwrap(); + + assert_eq!(result.len(), expected_candidates.len()); + result.into_iter().for_each(|c| { + assert!( + expected_candidates + .iter() + .any(|c2| c.candidate().corresponds_to(&c2.receipt())), + "Failed to find candidate: {:?}", + c, + ) + }); + }, + ) + } + + #[test] + fn request_from_prospective_parachains_multiple_cores_per_para_elastic_scaling_mvp() { + let mock_cores = mock_availability_cores_multiple_per_para(); // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = - [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + vec![1, 4, 7, 8, 10, 12, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15]; // Expect prospective parachains subsystem requests. let prospective_parachains_mode = ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; - let expected_backed = expected_candidates - .iter() - .map(|c| { - BackedCandidate::new( - CommittedCandidateReceipt { - descriptor: c.descriptor.clone(), - commitments: Default::default(), - }, - Vec::new(), - default_bitvec(MOCK_GROUP_SIZE), - None, + let (candidates, expected_candidates) = + make_candidates(mock_cores.len(), expected_candidates); + + let mut required_ancestors: HashMap, Ancestors> = HashMap::new(); + required_ancestors.insert( + vec![candidates[4]], + vec![CandidateHash(Hash::from_low_u64_be(41))].into_iter().collect(), + ); + required_ancestors.insert( + vec![candidates[8]], + vec![CandidateHash(Hash::from_low_u64_be(81))].into_iter().collect(), + ); + required_ancestors.insert( + [12, 12, 12].iter().map(|&idx| candidates[idx]).collect::>(), + vec![ + CandidateHash(Hash::from_low_u64_be(121)), + CandidateHash(Hash::from_low_u64_be(122)), + CandidateHash(Hash::from_low_u64_be(123)), + ] + .into_iter() + .collect(), + ); + required_ancestors.insert( + [13, 13, 13].iter().map(|&idx| candidates[idx]).collect::>(), + (131..=139) + .map(|num| CandidateHash(Hash::from_low_u64_be(num))) + .chain(std::iter::once(CandidateHash(Hash::from_low_u64_be(1398)))) + .collect(), + ); + + required_ancestors.insert( + [15, 15].iter().map(|&idx| candidates[idx]).collect::>(), + vec![ + CandidateHash(Hash::from_low_u64_be(151)), + CandidateHash(Hash::from_low_u64_be(152)), + ] + .into_iter() + .collect(), + ); + + let mock_cores_clone = mock_cores.clone(); + let expected_candidates_clone = expected_candidates.clone(); + test_harness( + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_candidates, + required_ancestors, + prospective_parachains_mode, ) - }) - .collect(); + }, + |mut tx: TestSubsystemSender| async move { + let result = select_candidates( + &mock_cores, + &[], + &[], + prospective_parachains_mode, + true, + Default::default(), + &mut tx, + ) + .await + .unwrap(); + assert_eq!(result.len(), expected_candidates_clone.len()); + result.into_iter().for_each(|c| { + assert!( + expected_candidates_clone + .iter() + .any(|c2| c.candidate().corresponds_to(&c2.receipt())), + "Failed to find candidate: {:?}", + c, + ) + }); + }, + ) + } + + #[test] + fn request_from_prospective_parachains_multiple_cores_per_para_elastic_scaling_mvp_disabled() { + let mock_cores = mock_availability_cores_multiple_per_para(); + + // why those particular indices? see the comments on mock_availability_cores() + let expected_candidates: Vec<_> = vec![1, 4, 7, 8, 10]; + // Expect prospective parachains subsystem requests. + let prospective_parachains_mode = + ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; + + let (candidates, expected_candidates) = + make_candidates(mock_cores.len(), expected_candidates); + + let mut required_ancestors: HashMap, Ancestors> = HashMap::new(); + required_ancestors.insert( + vec![candidates[4]], + vec![CandidateHash(Hash::from_low_u64_be(41))].into_iter().collect(), + ); + required_ancestors.insert( + vec![candidates[8]], + vec![CandidateHash(Hash::from_low_u64_be(81))].into_iter().collect(), + ); + + let mock_cores_clone = mock_cores.clone(); + let expected_candidates_clone = expected_candidates.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_candidates, + required_ancestors, + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, &[], &[], prospective_parachains_mode, + false, Default::default(), &mut tx, ) .await .unwrap(); + assert_eq!(result.len(), expected_candidates_clone.len()); result.into_iter().for_each(|c| { assert!( - expected_candidates.iter().any(|c2| c.candidate().corresponds_to(c2)), + expected_candidates_clone + .iter() + .any(|c2| c.candidate().corresponds_to(&c2.receipt())), "Failed to find candidate: {:?}", c, ) @@ -651,18 +1079,11 @@ mod select_candidates { #[test] fn request_receipts_based_on_relay_parent() { - let mock_cores = mock_availability_cores(); - let empty_hash = PersistedValidationData::::default().hash(); - - let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); - descriptor_template.persisted_validation_data_hash = empty_hash; - let candidate_template = CandidateReceipt { - descriptor: descriptor_template, - commitments_hash: CandidateCommitments::default().hash(), - }; + let mock_cores = mock_availability_cores_one_per_para(); + let candidate_template = dummy_candidate_template(); let candidates: Vec<_> = std::iter::repeat(candidate_template) - .take(mock_cores.len()) + .take(mock_cores.len() + 1) .enumerate() .map(|(idx, mut candidate)| { candidate.descriptor.para_id = idx.into(); @@ -673,7 +1094,7 @@ mod select_candidates { // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = - [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + [1, 4, 7, 8, 10, 12].iter().map(|&idx| candidates[idx].clone()).collect(); // Expect prospective parachains subsystem requests. let prospective_parachains_mode = ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; @@ -693,14 +1114,24 @@ mod select_candidates { }) .collect(); + let mock_cores_clone = mock_cores.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, &[], &[], prospective_parachains_mode, + false, Default::default(), &mut tx, ) diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 549e43a671d..f11291fd0ea 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -1112,6 +1112,9 @@ pub struct ProspectiveValidationDataRequest { /// is present in and the depths of that tree the candidate is present in. pub type FragmentTreeMembership = Vec<(Hash, Vec)>; +/// A collection of ancestor candidates of a parachain. +pub type Ancestors = HashSet; + /// Messages sent to the Prospective Parachains subsystem. #[derive(Debug)] pub enum ProspectiveParachainsMessage { @@ -1128,15 +1131,18 @@ pub enum ProspectiveParachainsMessage { /// has been backed. This requires that the candidate was successfully introduced in /// the past. CandidateBacked(ParaId, CandidateHash), - /// Get N backable candidate hashes along with their relay parents for the given parachain, - /// under the given relay-parent hash, which is a descendant of the given candidate hashes. + /// Try getting N backable candidate hashes along with their relay parents for the given + /// parachain, under the given relay-parent hash, which is a descendant of the given ancestors. + /// Timed out ancestors should not be included in the collection. /// N should represent the number of scheduled cores of this ParaId. - /// Returns `None` on the channel if no such candidate exists. + /// A timed out ancestor frees the cores of all of its descendants, so if there's a hole in the + /// supplied ancestor path, we'll get candidates that backfill those timed out slots first. It + /// may also return less/no candidates, if there aren't enough backable candidates recorded. GetBackableCandidates( Hash, ParaId, u32, - Vec, + Ancestors, oneshot::Sender>, ), /// Get the hypothetical frontier membership of candidates with the given properties diff --git a/prdoc/pr_3233.prdoc b/prdoc/pr_3233.prdoc new file mode 100644 index 00000000000..ed4e8cce31f --- /dev/null +++ b/prdoc/pr_3233.prdoc @@ -0,0 +1,12 @@ +title: "provisioner: allow multiple cores assigned to the same para" + +topic: Node + +doc: + - audience: Node Dev + description: | + Enable supplying multiple backable candidates to the paras_inherent pallet for the same paraid. + +crates: + - name: polkadot-node-core-prospective-parachains + - name: polkadot-node-core-provisioner -- GitLab From cdc8d197e6d487ef54f7e16767b5c1ab041c8b10 Mon Sep 17 00:00:00 2001 From: gupnik Date: Sat, 2 Mar 2024 18:19:12 +0530 Subject: [PATCH 33/86] Remove `as frame_system::DefaultConfig` from the required syntax in `derive_impl` (#3505) Step in https://github.com/paritytech/polkadot-sdk/issues/171 This PR removes the need to specify `as [disambiguation_path]` for cases where the trait definition resides within the same scope as default impl path. For example, in the following macro invocation ```rust #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { ... } ``` the trait `DefaultConfig` lies within the `frame_system` scope and `TestDefaultConfig` impls the `DefaultConfig` trait. Using this information, we can compute the disambiguation path internally, thus removing the need of an explicit specification. In cases where the trait lies outside this scope, we would still need to specify it explicitly, but this should take care of most (if not all) uses of `derive_impl` within FRAME's context. --- prdoc/pr_3505.prdoc | 36 ++++++++++ .../frame/examples/default-config/src/lib.rs | 2 +- .../support/procedural/src/derive_impl.rs | 68 +++++++++++++++---- substrate/frame/support/procedural/src/lib.rs | 5 ++ 4 files changed, 98 insertions(+), 13 deletions(-) create mode 100644 prdoc/pr_3505.prdoc diff --git a/prdoc/pr_3505.prdoc b/prdoc/pr_3505.prdoc new file mode 100644 index 00000000000..08ad909739c --- /dev/null +++ b/prdoc/pr_3505.prdoc @@ -0,0 +1,36 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removes `as [disambiguation_path]` from the required syntax in `derive_impl` + +doc: + - audience: Runtime Dev + description: | + This PR removes the need to specify `as [disambiguation_path]` for cases where the trait + definition resides within the same scope as default impl path. + + For example, in the following macro invocation + ```rust + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + ... + } + ``` + the trait `DefaultConfig` lies within the `frame_system` scope and `TestDefaultConfig` impls + the `DefaultConfig` trait. + + Using this information, we can compute the disambiguation path internally, thus removing the + need of an explicit specification: + ```rust + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for Runtime { + ... + } + ``` + + In cases where the trait lies outside this scope, we would still need to specify it explicitly, + but this should take care of most (if not all) uses of `derive_impl` within FRAME's context. + +crates: + - name: frame-support-procedural + - name: pallet-default-config-example diff --git a/substrate/frame/examples/default-config/src/lib.rs b/substrate/frame/examples/default-config/src/lib.rs index cd1653e6c76..69eca055965 100644 --- a/substrate/frame/examples/default-config/src/lib.rs +++ b/substrate/frame/examples/default-config/src/lib.rs @@ -149,7 +149,7 @@ pub mod tests { } ); - #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { // these items are defined by frame-system as `no_default`, so we must specify them here. type Block = Block; diff --git a/substrate/frame/support/procedural/src/derive_impl.rs b/substrate/frame/support/procedural/src/derive_impl.rs index d6d5bf68efd..8740ccd401a 100644 --- a/substrate/frame/support/procedural/src/derive_impl.rs +++ b/substrate/frame/support/procedural/src/derive_impl.rs @@ -172,6 +172,33 @@ fn combine_impls( final_impl } +/// Computes the disambiguation path for the `derive_impl` attribute macro. +/// +/// When specified explicitly using `as [disambiguation_path]` in the macro attr, the +/// disambiguation is used as is. If not, we infer the disambiguation path from the +/// `foreign_impl_path` and the computed scope. +fn compute_disambiguation_path( + disambiguation_path: Option, + foreign_impl: ItemImpl, + default_impl_path: Path, +) -> Result { + match (disambiguation_path, foreign_impl.clone().trait_) { + (Some(disambiguation_path), _) => Ok(disambiguation_path), + (None, Some((_, foreign_impl_path, _))) => + if default_impl_path.segments.len() > 1 { + let scope = default_impl_path.segments.first(); + Ok(parse_quote!(#scope :: #foreign_impl_path)) + } else { + Ok(foreign_impl_path) + }, + _ => Err(syn::Error::new( + default_impl_path.span(), + "Impl statement must have a defined type being implemented \ + for a defined type such as `impl A for B`", + )), + } +} + /// Internal implementation behind [`#[derive_impl(..)]`](`macro@crate::derive_impl`). /// /// `default_impl_path`: the module path of the external `impl` statement whose tokens we are @@ -194,18 +221,11 @@ pub fn derive_impl( let foreign_impl = parse2::(foreign_tokens)?; let default_impl_path = parse2::(default_impl_path)?; - // have disambiguation_path default to the item being impl'd in the foreign impl if we - // don't specify an `as [disambiguation_path]` in the macro attr - let disambiguation_path = match (disambiguation_path, foreign_impl.clone().trait_) { - (Some(disambiguation_path), _) => disambiguation_path, - (None, Some((_, foreign_impl_path, _))) => foreign_impl_path, - _ => - return Err(syn::Error::new( - foreign_impl.span(), - "Impl statement must have a defined type being implemented \ - for a defined type such as `impl A for B`", - )), - }; + let disambiguation_path = compute_disambiguation_path( + disambiguation_path, + foreign_impl.clone(), + default_impl_path.clone(), + )?; // generate the combined impl let combined_impl = combine_impls( @@ -257,3 +277,27 @@ fn test_runtime_type_with_doc() { } } } + +#[test] +fn test_disambugation_path() { + let foreign_impl: ItemImpl = parse_quote!(impl SomeTrait for SomeType {}); + let default_impl_path: Path = parse_quote!(SomeScope::SomeType); + + // disambiguation path is specified + let disambiguation_path = compute_disambiguation_path( + Some(parse_quote!(SomeScope::SomePath)), + foreign_impl.clone(), + default_impl_path.clone(), + ); + assert_eq!(disambiguation_path.unwrap(), parse_quote!(SomeScope::SomePath)); + + // disambiguation path is not specified and the default_impl_path has more than one segment + let disambiguation_path = + compute_disambiguation_path(None, foreign_impl.clone(), default_impl_path.clone()); + assert_eq!(disambiguation_path.unwrap(), parse_quote!(SomeScope::SomeTrait)); + + // disambiguation path is not specified and the default_impl_path has only one segment + let disambiguation_path = + compute_disambiguation_path(None, foreign_impl.clone(), parse_quote!(SomeType)); + assert_eq!(disambiguation_path.unwrap(), parse_quote!(SomeTrait)); +} diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 5840377d722..ed920d6a1da 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -639,6 +639,11 @@ pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream /// default to `A` from the `impl A for B` part of the default impl. This is useful for scenarios /// where all of the relevant types are already in scope via `use` statements. /// +/// In case the `default_impl_path` is scoped to a different module such as +/// `some::path::TestTraitImpl`, the same scope is assumed for the `disambiguation_path`, i.e. +/// `some::A`. This enables the use of `derive_impl` attribute without having to specify the +/// `disambiguation_path` in most (if not all) uses within FRAME's context. +/// /// Conversely, the `default_impl_path` argument is required and cannot be omitted. /// /// Optionally, `no_aggregated_types` can be specified as follows: -- GitLab From b0741d4f78ebc424c7544e1d2d5db7968132e577 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Sun, 3 Mar 2024 19:37:41 +1100 Subject: [PATCH 34/86] Finish documenting `#[pallet::xxx]` macros (#2638) Closes https://github.com/paritytech/polkadot-sdk-docs/issues/35 - Moves pallet proc macro docs to `frame_support` - Adds missing docs - Revise revise existing docs, adding compiling doctests where appropriate --------- Co-authored-by: command-bot <> Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- Cargo.lock | 1 + docs/sdk/src/guides/your_first_pallet/mod.rs | 20 +- substrate/frame/support/Cargo.toml | 2 + substrate/frame/support/procedural/src/lib.rs | 746 +----- substrate/frame/support/src/lib.rs | 2345 +++++++---------- 5 files changed, 1127 insertions(+), 1987 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c42ec612e6c..ad218001b40 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5750,6 +5750,7 @@ dependencies = [ "sp-staking", "sp-state-machine", "sp-std 14.0.0", + "sp-timestamp", "sp-tracing 16.0.0", "sp-weights", "static_assertions", diff --git a/docs/sdk/src/guides/your_first_pallet/mod.rs b/docs/sdk/src/guides/your_first_pallet/mod.rs index 20a8639b270..6a262924824 100644 --- a/docs/sdk/src/guides/your_first_pallet/mod.rs +++ b/docs/sdk/src/guides/your_first_pallet/mod.rs @@ -250,14 +250,16 @@ // of event is probably not the best. //! //! With the explanation out of the way, let's see how these components can be added. Both follow a -//! fairly familiar syntax: normal Rust enums, with an extra `#[frame::event/error]` attribute -//! attached. +//! fairly familiar syntax: normal Rust enums, with extra +//! [`#[frame::event]`](frame::pallet_macros::event) and +//! [`#[frame::error]`](frame::pallet_macros::error) attributes attached. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Event)] #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Error)] //! -//! One slightly custom part of this is the `#[pallet::generate_deposit(pub(super) fn -//! deposit_event)]` part. Without going into too much detail, in order for a pallet to emit events -//! to the rest of the system, it needs to do two things: +//! One slightly custom part of this is the [`#[pallet::generate_deposit(pub(super) fn +//! deposit_event)]`](frame::pallet_macros::generate_deposit) part. Without going into too +//! much detail, in order for a pallet to emit events to the rest of the system, it needs to do two +//! things: //! //! 1. Declare a type in its `Config` that refers to the overarching event type of the runtime. In //! short, by doing this, the pallet is expressing an important bound: `type RuntimeEvent: @@ -266,7 +268,8 @@ //! store it where needed. //! //! 2. But, doing this conversion and storing is too much to expect each pallet to define. FRAME -//! provides a default way of storing events, and this is what `pallet::generate_deposit` is doing. +//! provides a default way of storing events, and this is what +//! [`pallet::generate_deposit`](frame::pallet_macros::generate_deposit) is doing. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", config_v2)] //! //! > These `Runtime*` types are better explained in @@ -280,8 +283,9 @@ #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", runtime_v2)] //! //! In this snippet, the actual `RuntimeEvent` type (right hand side of `type RuntimeEvent = -//! RuntimeEvent`) is generated by `construct_runtime`. An interesting way to inspect this type is -//! to see its definition in rust-docs: +//! RuntimeEvent`) is generated by +//! [`construct_runtime`](frame::runtime::prelude::construct_runtime). An interesting way to inspect +//! this type is to see its definition in rust-docs: //! [`crate::guides::your_first_pallet::pallet_v2::tests::runtime_v2::RuntimeEvent`]. //! //! diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index c19f478c803..fb9cb3d4511 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -66,6 +66,7 @@ aquamarine = { version = "0.5.0" } [dev-dependencies] assert_matches = "1.3.0" pretty_assertions = "1.2.1" +sp-timestamp = { path = "../../primitives/timestamp", default-features = false } frame-system = { path = "../system" } sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } @@ -96,6 +97,7 @@ std = [ "sp-std/std", "sp-tracing/std", "sp-weights/std", + "sp-timestamp/std" ] runtime-benchmarks = [ "frame-system/runtime-benchmarks", diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index ed920d6a1da..036da52a7ba 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -188,133 +188,11 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { construct_runtime::construct_runtime(input) } -/// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to specify -/// pallet information. /// -/// The struct must be defined as follows: -/// ```ignore -/// #[pallet::pallet] -/// pub struct Pallet(_); -/// ``` -/// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. -/// -/// ## Macro expansion: -/// -/// The macro adds this attribute to the struct definition: -/// ```ignore -/// #[derive( -/// frame_support::CloneNoBound, -/// frame_support::EqNoBound, -/// frame_support::PartialEqNoBound, -/// frame_support::RuntimeDebugNoBound, -/// )] -/// ``` -/// and replaces the type `_` with `PhantomData`. It also implements on the pallet: -/// * `GetStorageVersion` -/// * `OnGenesis`: contains some logic to write the pallet version into storage. -/// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. -/// -/// It declares `type Module` type alias for `Pallet`, used by `construct_runtime`. -/// -/// It implements `PalletInfoAccess` on `Pallet` to ease access to pallet information given by -/// `frame_support::traits::PalletInfo`. (The implementation uses the associated type -/// `frame_system::Config::PalletInfo`). -/// -/// It implements `StorageInfoTrait` on `Pallet` which give information about all storages. -/// -/// If the attribute `generate_store` is set then the macro creates the trait `Store` and -/// implements it on `Pallet`. -/// -/// If the attribute `set_storage_max_encoded_len` is set then the macro calls -/// `StorageInfoTrait` for each storage in the implementation of `StorageInfoTrait` for the -/// pallet. Otherwise it implements `StorageInfoTrait` for the pallet using the -/// `PartialStorageInfoTrait` implementation of storages. -/// -/// ## Dev Mode (`#[pallet(dev_mode)]`) -/// -/// Specifying the argument `dev_mode` will allow you to enable dev mode for a pallet. The aim -/// of dev mode is to loosen some of the restrictions and requirements placed on production -/// pallets for easy tinkering and development. Dev mode pallets should not be used in -/// production. Enabling dev mode has the following effects: -/// -/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By default, dev -/// mode pallets will assume a weight of zero (`0`) if a weight is not specified. This is -/// equivalent to specifying `#[weight(0)]` on all calls that do not specify a weight. -/// * Call indices no longer need to be specified on every `#[pallet::call]` declaration. By -/// default, dev mode pallets will assume a call index based on the order of the call. -/// * All storages are marked as unbounded, meaning you do not need to implement `MaxEncodedLen` on -/// storage types. This is equivalent to specifying `#[pallet::unbounded]` on all storage type -/// definitions. -/// * Storage hashers no longer need to be specified and can be replaced by `_`. In dev mode, these -/// will be replaced by `Blake2_128Concat`. In case of explicit key-binding, `Hasher` can simply -/// be ignored when in `dev_mode`. -/// -/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or -/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This argument -/// cannot be specified anywhere else, including but not limited to the `#[pallet::pallet]` -/// attribute macro. -/// -///
-/// WARNING:
-/// You should not deploy or use dev mode pallets in production. Doing so can break your chain
-/// and therefore should never be done. Once you are done tinkering, you should remove the
-/// 'dev_mode' argument from your #[pallet] declaration and fix any compile errors before
-/// attempting to use your pallet in a production scenario.
-/// 
-/// -/// See `frame_support::pallet` docs for more info. -/// -/// ## Runtime Metadata Documentation -/// -/// The documentation added to this pallet is included in the runtime metadata. -/// -/// The documentation can be defined in the following ways: -/// -/// ```ignore -/// #[pallet::pallet] -/// /// Documentation for pallet 1 -/// #[doc = "Documentation for pallet 2"] -/// #[doc = include_str!("../README.md")] -/// #[pallet_doc("../doc1.md")] -/// #[pallet_doc("../doc2.md")] -/// pub mod pallet {} -/// ``` -/// -/// The runtime metadata for this pallet contains the following -/// - " Documentation for pallet 1" (captured from `///`) -/// - "Documentation for pallet 2" (captured from `#[doc]`) -/// - content of ../README.md (captured from `#[doc]` with `include_str!`) -/// - content of "../doc1.md" (captured from `pallet_doc`) -/// - content of "../doc2.md" (captured from `pallet_doc`) -/// -/// ### `doc` attribute -/// -/// The value of the `doc` attribute is included in the runtime metadata, as well as -/// expanded on the pallet module. The previous example is expanded to: -/// -/// ```ignore -/// /// Documentation for pallet 1 -/// /// Documentation for pallet 2 -/// /// Content of README.md -/// pub mod pallet {} -/// ``` -/// -/// If you want to specify the file from which the documentation is loaded, you can use the -/// `include_str` macro. However, if you only want the documentation to be included in the -/// runtime metadata, use the `pallet_doc` attribute. -/// -/// ### `pallet_doc` attribute -/// -/// Unlike the `doc` attribute, the documentation provided to the `pallet_doc` attribute is -/// not inserted on the module. -/// -/// The `pallet_doc` attribute can only be provided with one argument, -/// which is the file path that holds the documentation to be added to the metadata. +/// --- /// -/// This approach is beneficial when you use the `include_str` macro at the beginning of the file -/// and want that documentation to extend to the runtime metadata, without reiterating the -/// documentation on the pallet module itself. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet`. #[proc_macro_attribute] pub fn pallet(attr: TokenStream, item: TokenStream) -> TokenStream { pallet::pallet(attr, item) @@ -408,19 +286,28 @@ pub fn transactional(attr: TokenStream, input: TokenStream) -> TokenStream { transactional::transactional(attr, input).unwrap_or_else(|e| e.to_compile_error().into()) } +/// +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::require_transactional`. #[proc_macro_attribute] pub fn require_transactional(attr: TokenStream, input: TokenStream) -> TokenStream { transactional::require_transactional(attr, input) .unwrap_or_else(|e| e.to_compile_error().into()) } -/// Derive [`Clone`] but do not bound any generic. Docs are at `frame_support::CloneNoBound`. +/// Derive [`Clone`] but do not bound any generic. +/// +/// Docs at `frame_support::CloneNoBound`. #[proc_macro_derive(CloneNoBound)] pub fn derive_clone_no_bound(input: TokenStream) -> TokenStream { no_bound::clone::derive_clone_no_bound(input) } -/// Derive [`Debug`] but do not bound any generics. Docs are at `frame_support::DebugNoBound`. +/// Derive [`Debug`] but do not bound any generics. +/// +/// Docs at `frame_support::DebugNoBound`. #[proc_macro_derive(DebugNoBound)] pub fn derive_debug_no_bound(input: TokenStream) -> TokenStream { no_bound::debug::derive_debug_no_bound(input) @@ -452,14 +339,17 @@ pub fn derive_runtime_debug_no_bound(input: TokenStream) -> TokenStream { } } -/// Derive [`PartialEq`] but do not bound any generic. Docs are at -/// `frame_support::PartialEqNoBound`. +/// Derive [`PartialEq`] but do not bound any generic. +/// +/// Docs at `frame_support::PartialEqNoBound`. #[proc_macro_derive(PartialEqNoBound)] pub fn derive_partial_eq_no_bound(input: TokenStream) -> TokenStream { no_bound::partial_eq::derive_partial_eq_no_bound(input) } -/// Derive [`Eq`] but do no bound any generic. Docs are at `frame_support::EqNoBound`. +/// DeriveEq but do no bound any generic. +/// +/// Docs at `frame_support::EqNoBound`. #[proc_macro_derive(EqNoBound)] pub fn derive_eq_no_bound(input: TokenStream) -> TokenStream { let input = syn::parse_macro_input!(input as syn::DeriveInput); @@ -494,6 +384,7 @@ pub fn derive_default_no_bound(input: TokenStream) -> TokenStream { no_bound::default::derive_default_no_bound(input) } +/// Macro used internally in FRAME to generate the crate version for a pallet. #[proc_macro] pub fn crate_to_crate_version(input: TokenStream) -> TokenStream { crate_version::crate_to_crate_version(input) @@ -555,42 +446,11 @@ pub fn __create_tt_macro(input: TokenStream) -> TokenStream { tt_macro::create_tt_return_macro(input) } -/// Allows accessing on-chain pallet storage that is no longer accessible via the pallet. -/// -/// This is especially useful when writing storage migrations, when types of storage items are -/// modified or outright removed, but the previous definition is required to perform the migration. /// -/// ## Example -/// -/// Imagine a pallet with the following storage definition: -/// ```ignore -/// #[pallet::storage] -/// pub type Value = StorageValue<_, u32>; -/// ``` -/// `Value` can be accessed by calling `Value::::get()`. -/// -/// Now imagine the definition of `Value` is updated to a `(u32, u32)`: -/// ```ignore -/// #[pallet::storage] -/// pub type Value = StorageValue<_, (u32, u32)>; -/// ``` -/// The on-chain value of `Value` is `u32`, but `Value::::get()` expects it to be `(u32, u32)`. -/// -/// In this instance the developer must write a storage migration to reading the old value of -/// `Value` and writing it back to storage in the new format, so that the on-chain storage layout is -/// consistent with what is defined in the pallet. -/// -/// We can read the old v0 value of `Value` in the migration by creating a `storage_alias`: -/// ```ignore -/// pub(crate) mod v0 { -/// use super::*; -/// -/// #[storage_alias] -/// pub type Value = StorageValue, u32>; -/// } -/// ``` +/// --- /// -/// The developer can now access the old value of `Value` by calling `v0::Value::::get()`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::storage_alias`. #[proc_macro_attribute] pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream { storage_alias::storage_alias(attributes.into(), input.into()) @@ -826,24 +686,21 @@ pub fn derive_impl(attrs: TokenStream, input: TokenStream) -> TokenStream { .into() } -/// The optional attribute `#[pallet::no_default]` can be attached to trait items within a -/// `Config` trait impl that has [`#[pallet::config(with_default)]`](`macro@config`) attached. /// -/// Attaching this attribute to a trait item ensures that that trait item will not be used as a -/// default with the [`#[derive_impl(..)]`](`macro@derive_impl`) attribute macro. +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::no_default`. #[proc_macro_attribute] pub fn no_default(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The optional attribute `#[pallet::no_default_bounds]` can be attached to trait items within a -/// `Config` trait impl that has [`#[pallet::config(with_default)]`](`macro@config`) attached. /// -/// Attaching this attribute to a trait item ensures that the generated trait `DefaultConfig` -/// will not have any bounds for this trait item. +/// --- /// -/// As an example, if you have a trait item `type AccountId: SomeTrait;` in your `Config` trait, -/// the generated `DefaultConfig` will only have `type AccountId;` with no trait bound. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::no_default_bounds`. #[proc_macro_attribute] pub fn no_default_bounds(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -923,6 +780,11 @@ pub fn register_default_impl(attrs: TokenStream, tokens: TokenStream) -> TokenSt } } +/// +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_prelude::inject_runtime_type`. #[proc_macro_attribute] pub fn inject_runtime_type(_: TokenStream, tokens: TokenStream) -> TokenStream { let item = tokens.clone(); @@ -956,75 +818,11 @@ fn pallet_macro_stub() -> TokenStream { .into() } -/// The mandatory attribute `#[pallet::config]` defines the configurable options for the pallet. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::config] -/// pub trait Config: frame_system::Config + $optionally_some_other_supertraits -/// $optional_where_clause -/// { -/// ... -/// } -/// ``` -/// -/// I.e. a regular trait definition named `Config`, with the supertrait -/// `frame_system::pallet::Config`, and optionally other supertraits and a where clause. -/// (Specifying other supertraits here is known as [tight -/// coupling](https://docs.substrate.io/reference/how-to-guides/pallet-design/use-tight-coupling/)) -/// -/// The associated type `RuntimeEvent` is reserved. If defined, it must have the bounds -/// `From` and `IsType<::RuntimeEvent>`. /// -/// [`pallet::event`](`macro@event`) must be present if `RuntimeEvent` exists as a config item -/// in your `#[pallet::config]`. -/// -/// ## Optional: `with_default` -/// -/// An optional `with_default` argument may also be specified. Doing so will automatically -/// generate a `DefaultConfig` trait inside your pallet which is suitable for use with -/// [`[#[derive_impl(..)]`](`macro@derive_impl`) to derive a default testing config: -/// -/// ```ignore -/// #[pallet::config(with_default)] -/// pub trait Config: frame_system::Config { -/// type RuntimeEvent: Parameter -/// + Member -/// + From> -/// + Debug -/// + IsType<::RuntimeEvent>; -/// -/// #[pallet::no_default] -/// type BaseCallFilter: Contains; -/// // ... -/// } -/// ``` -/// -/// As shown above, you may also attach the [`#[pallet::no_default]`](`macro@no_default`) -/// attribute to specify that a particular trait item _cannot_ be used as a default when a test -/// `Config` is derived using the [`#[derive_impl(..)]`](`macro@derive_impl`) attribute macro. -/// This will cause that particular trait item to simply not appear in default testing configs -/// based on this config (the trait item will not be included in `DefaultConfig`). -/// -/// ### `DefaultConfig` Caveats -/// -/// The auto-generated `DefaultConfig` trait: -/// - is always a _subset_ of your pallet's `Config` trait. -/// - can only contain items that don't rely on externalities, such as `frame_system::Config`. -/// -/// Trait items that _do_ rely on externalities should be marked with -/// [`#[pallet::no_default]`](`macro@no_default`) -/// -/// Consequently: -/// - Any items that rely on externalities _must_ be marked with -/// [`#[pallet::no_default]`](`macro@no_default`) or your trait will fail to compile when used -/// with [`derive_impl`](`macro@derive_impl`). -/// - Items marked with [`#[pallet::no_default]`](`macro@no_default`) are entirely excluded from the -/// `DefaultConfig` trait, and therefore any impl of `DefaultConfig` doesn't need to implement -/// such items. +/// --- /// -/// For more information, see [`macro@derive_impl`]. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::config`. #[proc_macro_attribute] pub fn config(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1033,7 +831,7 @@ pub fn config(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// Rust-Analyzer Users: Documentation for this macro can be found at /// `frame_support::pallet_macros::constant`. #[proc_macro_attribute] pub fn constant(_: TokenStream, _: TokenStream) -> TokenStream { @@ -1043,106 +841,58 @@ pub fn constant(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// Rust-Analyzer Users: Documentation for this macro can be found at /// `frame_support::pallet_macros::constant_name`. #[proc_macro_attribute] pub fn constant_name(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// To bypass the `frame_system::Config` supertrait check, use the attribute -/// `pallet::disable_frame_system_supertrait_check`, e.g.: /// -/// ```ignore -/// #[pallet::config] -/// #[pallet::disable_frame_system_supertrait_check] -/// pub trait Config: pallet_timestamp::Config {} -/// ``` +/// --- /// -/// NOTE: Bypassing the `frame_system::Config` supertrait check is typically desirable when you -/// want to write an alternative to the `frame_system` pallet. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::disable_frame_system_supertrait_check`. #[proc_macro_attribute] pub fn disable_frame_system_supertrait_check(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// To generate a `Store` trait associating all storages, annotate your `Pallet` struct with -/// the attribute `#[pallet::generate_store($vis trait Store)]`, e.g.: -/// -/// ```ignore -/// #[pallet::pallet] -/// #[pallet::generate_store(pub(super) trait Store)] -/// pub struct Pallet(_); -/// ``` -/// More precisely, the `Store` trait contains an associated type for each storage. It is -/// implemented for `Pallet` allowing access to the storage from pallet struct. /// -/// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using -/// `::Foo`. +/// --- /// -/// NOTE: this attribute is only valid when applied _directly_ to your `Pallet` struct -/// definition. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::generate_store`. #[proc_macro_attribute] pub fn generate_store(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Because the `pallet::pallet` macro implements `GetStorageVersion`, the current storage -/// version needs to be communicated to the macro. This can be done by using the -/// `pallet::storage_version` attribute: -/// -/// ```ignore -/// const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); /// -/// #[pallet::pallet] -/// #[pallet::storage_version(STORAGE_VERSION)] -/// pub struct Pallet(_); -/// ``` +/// --- /// -/// If not present, the in-code storage version is set to the default value. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::storage_version`. #[proc_macro_attribute] pub fn storage_version(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::hooks]` attribute allows you to specify a `Hooks` implementation for -/// `Pallet` that specifies pallet-specific logic. /// -/// The item the attribute attaches to must be defined as follows: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks> for Pallet $optional_where_clause { -/// ... -/// } -/// ``` -/// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait -/// `Hooks>` (they are defined in preludes), for the type `Pallet` and -/// with an optional where clause. -/// -/// If no `#[pallet::hooks]` exists, then the following default implementation is -/// automatically generated: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks> for Pallet {} -/// ``` -/// -/// ## Macro expansion -/// -/// The macro implements the traits `OnInitialize`, `OnIdle`, `OnFinalize`, `OnRuntimeUpgrade`, -/// `OffchainWorker`, and `IntegrityTest` using the provided `Hooks` implementation. -/// -/// NOTE: `OnRuntimeUpgrade` is implemented with `Hooks::on_runtime_upgrade` and some -/// additional logic. E.g. logic to write the pallet version into storage. +/// --- /// -/// NOTE: The macro also adds some tracing logic when implementing the above traits. The -/// following hooks emit traces: `on_initialize`, `on_finalize` and `on_runtime_upgrade`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::hooks`. #[proc_macro_attribute] pub fn hooks(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, the -/// first argument must be `origin: OriginFor`. +/// +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::weight`. #[proc_macro_attribute] pub fn weight(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1151,8 +901,8 @@ pub fn weight(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in -/// [`frame_support::pallet_macros::call`](../../frame_support/pallet_macros/attr.call.html). +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::compact`. #[proc_macro_attribute] pub fn compact(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1161,8 +911,8 @@ pub fn compact(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in -/// [`frame_support::pallet_macros::call`](../../frame_support/pallet_macros/attr.call.html). +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::call`. #[proc_macro_attribute] pub fn call(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1173,164 +923,60 @@ pub fn call(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in -/// [`frame_support::pallet_macros::call`](../../frame_support/pallet_macros/attr.call.html). +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::call_index`. #[proc_macro_attribute] pub fn call_index(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Each dispatchable may be annotated with the `#[pallet::feeless_if($closure)]` attribute, -/// which explicitly defines the condition for the dispatchable to be feeless. -/// -/// The arguments for the closure must be the referenced arguments of the dispatchable function. -/// -/// The closure must return `bool`. -/// -/// ### Example -/// ```ignore -/// #[pallet::feeless_if(|_origin: &OriginFor, something: &u32| -> bool { -/// *something == 0 -/// })] -/// pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { -/// .... -/// } -/// ``` /// -/// Please note that this only works for signed dispatchables and requires a signed extension -/// such as `SkipCheckIfFeeless` as defined in `pallet-skip-feeless-payment` to wrap the existing -/// payment extension. Else, this is completely ignored and the dispatchable is still charged. +/// --- /// -/// ### Macro expansion +/// Rust-Analyzer Users: Documentation for this macro can be found at /// -/// The macro implements the `CheckIfFeeless` trait on the dispatchable and calls the corresponding -/// closure in the implementation. +/// `frame_support::pallet_macros::feeless_if`. #[proc_macro_attribute] pub fn feeless_if(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Allows you to define some extra constants to be added into constant metadata. /// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::extra_constants] -/// impl Pallet where $optional_where_clause { -/// /// $some_doc -/// $vis fn $fn_name() -> $some_return_type { -/// ... -/// } -/// ... -/// } -/// ``` -/// I.e. a regular rust `impl` block with some optional where clause and functions with 0 args, -/// 0 generics, and some return type. +/// --- /// -/// ## Macro expansion +/// Rust-Analyzer Users: Documentation for this macro can be found at /// -/// The macro add some extra constants to pallet constant metadata. +/// `frame_support::pallet_macros::extra_constants`. #[proc_macro_attribute] pub fn extra_constants(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::error]` attribute allows you to define an error enum that will be returned -/// from the dispatchable when an error occurs. The information for this error type is then -/// stored in metadata. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::error] -/// pub enum Error { -/// /// $some_optional_doc -/// $SomeFieldLessVariant, -/// /// $some_more_optional_doc -/// $SomeVariantWithOneField(FieldType), -/// ... -/// } -/// ``` -/// I.e. a regular enum named `Error`, with generic `T` and fieldless or multiple-field -/// variants. /// -/// Any field type in the enum variants must implement `TypeInfo` in order to be properly used -/// in the metadata, and its encoded size should be as small as possible, preferably 1 byte in -/// size in order to reduce storage size. The error enum itself has an absolute maximum encoded -/// size specified by `MAX_MODULE_ERROR_ENCODED_SIZE`. -/// -/// (1 byte can still be 256 different errors. The more specific the error, the easier it is to -/// diagnose problems and give a better experience to the user. Don't skimp on having lots of -/// individual error conditions.) -/// -/// Field types in enum variants must also implement `PalletError`, otherwise the pallet will -/// fail to compile. Rust primitive types have already implemented the `PalletError` trait -/// along with some commonly used stdlib types such as [`Option`] and `PhantomData`, and hence -/// in most use cases, a manual implementation is not necessary and is discouraged. -/// -/// The generic `T` must not bound anything and a `where` clause is not allowed. That said, -/// bounds and/or a where clause should not needed for any use-case. -/// -/// ## Macro expansion -/// -/// The macro implements the [`Debug`] trait and functions `as_u8` using variant position, and -/// `as_str` using variant doc. +/// --- /// -/// The macro also implements `From>` for `&'static str` and `From>` for -/// `DispatchError`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::error`. #[proc_macro_attribute] pub fn error(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::event]` attribute allows you to define pallet events. Pallet events are -/// stored under the `system` / `events` key when the block is applied (and then replaced when -/// the next block writes it's events). /// -/// The Event enum must be defined as follows: -/// -/// ```ignore -/// #[pallet::event] -/// #[pallet::generate_deposit($visibility fn deposit_event)] // Optional -/// pub enum Event<$some_generic> $optional_where_clause { -/// /// Some doc -/// $SomeName($SomeType, $YetanotherType, ...), -/// ... -/// } -/// ``` -/// -/// I.e. an enum (with named or unnamed fields variant), named `Event`, with generic: none or -/// `T` or `T: Config`, and optional w here clause. +/// --- /// -/// Each field must implement [`Clone`], [`Eq`], [`PartialEq`], `Encode`, `Decode`, and -/// [`Debug`] (on std only). For ease of use, bound by the trait `Member`, available in -/// `frame_support::pallet_prelude`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::event`. #[proc_macro_attribute] pub fn event(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generates a -/// helper function on `Pallet` that handles deposit events. /// -/// NOTE: For instantiable pallets, the event must be generic over `T` and `I`. -/// -/// ## Macro expansion -/// -/// The macro will add on enum `Event` the attributes: -/// * `#[derive(frame_support::CloneNoBound)]` -/// * `#[derive(frame_support::EqNoBound)]` -/// * `#[derive(frame_support::PartialEqNoBound)]` -/// * `#[derive(frame_support::RuntimeDebugNoBound)]` -/// * `#[derive(codec::Encode)]` -/// * `#[derive(codec::Decode)]` -/// -/// The macro implements `From>` for (). -/// -/// The macro implements a metadata function on `Event` returning the `EventMetadata`. +/// --- /// -/// If `#[pallet::generate_deposit]` is present then the macro implements `fn deposit_event` on -/// `Pallet`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::generate_deposit`. #[proc_macro_attribute] pub fn generate_deposit(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1339,124 +985,68 @@ pub fn generate_deposit(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// Rust-Analyzer Users: Documentation for this macro can be found at /// `frame_support::pallet_macros::storage`. #[proc_macro_attribute] pub fn storage(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows you to define a -/// getter function on `Pallet`. /// -/// Also see [`pallet::storage`](`macro@storage`) +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::getter`. #[proc_macro_attribute] pub fn getter(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allows you to define the -/// storage prefix to use. This is helpful if you wish to rename the storage field but don't -/// want to perform a migration. -/// -/// E.g: /// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::storage_prefix = "foo"] -/// #[pallet::getter(fn my_storage)] -/// pub(super) type MyStorage = StorageMap; -/// ``` -/// -/// or +/// --- /// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn my_storage)] -/// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; -/// ``` +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::storage_prefix`. #[proc_macro_attribute] pub fn storage_prefix(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The optional attribute `#[pallet::unbounded]` declares the storage as unbounded. When -/// implementating the storage info (when `#[pallet::generate_storage_info]` is specified on -/// the pallet struct placeholder), the size of the storage will be declared as unbounded. This -/// can be useful for storage which can never go into PoV (Proof of Validity). +/// +/// --- +/// +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::unbounded`. #[proc_macro_attribute] pub fn unbounded(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The optional attribute `#[pallet::whitelist_storage]` will declare the -/// storage as whitelisted from benchmarking. Doing so will exclude reads of -/// that value's storage key from counting towards weight calculations during -/// benchmarking. -/// -/// This attribute should only be attached to storages that are known to be -/// read/used in every block. This will result in a more accurate benchmarking weight. /// -/// ### Example -/// ```ignore -/// #[pallet::storage] -/// #[pallet::whitelist_storage] -/// pub(super) type Number = StorageValue<_, frame_system::pallet_prelude::BlockNumberFor::, ValueQuery>; -/// ``` +/// --- /// -/// NOTE: As with all `pallet::*` attributes, this one _must_ be written as -/// `#[pallet::whitelist_storage]` and can only be placed inside a `pallet` module in order for -/// it to work properly. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::whitelist_storage`. #[proc_macro_attribute] pub fn whitelist_storage(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The optional attribute `#[pallet::disable_try_decode_storage]` will declare the -/// storage as whitelisted from decoding during try-runtime checks. This should only be -/// attached to transient storage which cannot be migrated during runtime upgrades. /// -/// ### Example -/// ```ignore -/// #[pallet::storage] -/// #[pallet::disable_try_decode_storage] -/// pub(super) type Events = StorageValue<_, Vec>>, ValueQuery>; -/// ``` +/// --- /// -/// NOTE: As with all `pallet::*` attributes, this one _must_ be written as -/// `#[pallet::disable_try_decode_storage]` and can only be placed inside a `pallet` module in order -/// for it to work properly. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::disable_try_decode_storage`. #[proc_macro_attribute] pub fn disable_try_decode_storage(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::type_value]` attribute lets you define a struct implementing the `Get` trait -/// to ease the use of storage types. This attribute is meant to be used alongside -/// [`#[pallet::storage]`](`macro@storage`) to define a storage's default value. This attribute -/// can be used multiple times. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::type_value] -/// fn $MyDefaultName<$some_generic>() -> $default_type $optional_where_clause { $expr } -/// ``` -/// -/// I.e.: a function definition with generics none or `T: Config` and a returned type. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::type_value] -/// fn MyDefault() -> T::Balance { 3.into() } -/// ``` /// -/// ## Macro expansion +/// --- /// -/// The macro renames the function to some internal name, generates a struct with the original -/// name of the function and its generic, and implements `Get<$ReturnType>` by calling the user -/// defined function. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::type_value`. #[proc_macro_attribute] pub fn type_value(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1465,7 +1055,7 @@ pub fn type_value(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// Rust-Analyzer Users: Documentation for this macro can be found at /// `frame_support::pallet_macros::genesis_config`. #[proc_macro_attribute] pub fn genesis_config(_: TokenStream, _: TokenStream) -> TokenStream { @@ -1475,66 +1065,28 @@ pub fn genesis_config(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// Rust-Analyzer Users: Documentation for this macro can be found at /// `frame_support::pallet_macros::genesis_build`. #[proc_macro_attribute] pub fn genesis_build(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::inherent]` attribute allows the pallet to provide some -/// [inherent](https://docs.substrate.io/fundamentals/transaction-types/#inherent-transactions). -/// An inherent is some piece of data that is inserted by a block authoring node at block -/// creation time and can either be accepted or rejected by validators based on whether the -/// data falls within an acceptable range. -/// -/// The most common inherent is the `timestamp` that is inserted into every block. Since there -/// is no way to validate timestamps, validators simply check that the timestamp reported by -/// the block authoring node falls within an acceptable range. /// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::inherent] -/// impl ProvideInherent for Pallet { -/// // ... regular trait implementation -/// } -/// ``` -/// -/// I.e. a trait implementation with bound `T: Config`, of trait `ProvideInherent` for type -/// `Pallet`, and some optional where clause. -/// -/// ## Macro expansion +/// --- /// -/// The macro currently makes no use of this information, but it might use this information in -/// the future to give information directly to `construct_runtime`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::inherent`. #[proc_macro_attribute] pub fn inherent(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::validate_unsigned]` attribute allows the pallet to validate some unsigned -/// transaction: -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::validate_unsigned] -/// impl ValidateUnsigned for Pallet { -/// // ... regular trait implementation -/// } -/// ``` -/// -/// I.e. a trait implementation with bound `T: Config`, of trait `ValidateUnsigned` for type -/// `Pallet`, and some optional where clause. /// -/// NOTE: There is also the `sp_runtime::traits::SignedExtension` trait that can be used to add -/// some specific logic for transaction validation. -/// -/// ## Macro expansion +/// --- /// -/// The macro currently makes no use of this information, but it might use this information in -/// the future to give information directly to `construct_runtime`. +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::validate_unsigned`. #[proc_macro_attribute] pub fn validate_unsigned(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1543,36 +1095,18 @@ pub fn validate_unsigned(_: TokenStream, _: TokenStream) -> TokenStream { /// /// --- /// -/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// Rust-Analyzer Users: Documentation for this macro can be found at /// `frame_support::pallet_macros::origin`. #[proc_macro_attribute] pub fn origin(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::composite_enum]` attribute allows you to define an enum that gets composed as an -/// aggregate enum by `construct_runtime`. This is similar in principle with `#[pallet::event]` and -/// `#[pallet::error]`. /// -/// The attribute currently only supports enum definitions, and identifiers that are named -/// `FreezeReason`, `HoldReason`, `LockId` or `SlashReason`. Arbitrary identifiers for the enum are -/// not supported. The aggregate enum generated by `construct_runtime` will have the name of -/// `RuntimeFreezeReason`, `RuntimeHoldReason`, `RuntimeLockId` and `RuntimeSlashReason` -/// respectively. -/// -/// NOTE: The aggregate enum generated by `construct_runtime` generates a conversion function from -/// the pallet enum to the aggregate enum, and automatically derives the following traits: -/// -/// ```ignore -/// Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, TypeInfo, -/// RuntimeDebug -/// ``` +/// --- /// -/// For ease of usage, when no `#[derive]` attributes are found for the enum under -/// `#[pallet::composite_enum]`, the aforementioned traits are automatically derived for it. The -/// inverse is also true: if there are any `#[derive]` attributes found for the enum, then no traits -/// will automatically be derived for it (this implies that you need to provide the -/// `frame_support::traits::VariantCount` implementation). +/// Rust-Analyzer Users: Documentation for this macro can be found at +/// `frame_support::pallet_macros::composite_enum`. #[proc_macro_attribute] pub fn composite_enum(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1628,25 +1162,11 @@ pub fn task_index(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Can be attached to a module. Doing so will declare that module as importable into a pallet -/// via [`#[import_section]`](`macro@import_section`). -/// -/// Note that sections are imported by their module name/ident, and should be referred to by -/// their _full path_ from the perspective of the target pallet. Do not attempt to make use -/// of `use` statements to bring pallet sections into scope, as this will not work (unless -/// you do so as part of a wildcard import, in which case it will work). -/// -/// ## Naming Logistics /// -/// Also note that because of how `#[pallet_section]` works, pallet section names must be -/// globally unique _within the crate in which they are defined_. For more information on -/// why this must be the case, see macro_magic's -/// [`#[export_tokens]`](https://docs.rs/macro_magic/latest/macro_magic/attr.export_tokens.html) macro. +/// --- /// -/// Optionally, you may provide an argument to `#[pallet_section]` such as -/// `#[pallet_section(some_ident)]`, in the event that there is another pallet section in -/// same crate with the same ident/name. The ident you specify can then be used instead of -/// the module's ident name when you go to import it via `#[import_section]`. +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::pallet_section`. #[proc_macro_attribute] pub fn pallet_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { let tokens_clone = tokens.clone(); @@ -1660,39 +1180,11 @@ pub fn pallet_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { } } -/// An attribute macro that can be attached to a module declaration. Doing so will -/// Imports the contents of the specified external pallet section that was defined -/// previously using [`#[pallet_section]`](`macro@pallet_section`). /// -/// ## Example -/// ```ignore -/// #[import_section(some_section)] -/// #[pallet] -/// pub mod pallet { -/// // ... -/// } -/// ``` -/// where `some_section` was defined elsewhere via: -/// ```ignore -/// #[pallet_section] -/// pub mod some_section { -/// // ... -/// } -/// ``` -/// -/// This will result in the contents of `some_section` being _verbatim_ imported into -/// the pallet above. Note that since the tokens for `some_section` are essentially -/// copy-pasted into the target pallet, you cannot refer to imports that don't also -/// exist in the target pallet, but this is easily resolved by including all relevant -/// `use` statements within your pallet section, so they are imported as well, or by -/// otherwise ensuring that you have the same imports on the target pallet. -/// -/// It is perfectly permissible to import multiple pallet sections into the same pallet, -/// which can be done by having multiple `#[import_section(something)]` attributes -/// attached to the pallet. +/// --- /// -/// Note that sections are imported by their module name/ident, and should be referred to by -/// their _full path_ from the perspective of the target pallet. +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::import_section`. #[import_tokens_attr { format!( "{}::macro_magic", diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index d2d9a33053f..caf9457d666 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -900,18 +900,20 @@ pub mod pallet_prelude { }; pub use codec::{Decode, Encode, MaxEncodedLen}; pub use frame_support::pallet_macros::*; + /// The optional attribute `#[inject_runtime_type]` can be attached to `RuntimeCall`, /// `RuntimeEvent`, `RuntimeOrigin` or `PalletInfo` in an impl statement that has /// `#[register_default_impl]` attached to indicate that this item is generated by /// `construct_runtime`. /// /// Attaching this attribute to such an item ensures that the combined impl generated via - /// [`#[derive_impl(..)]`](`macro@super::derive_impl`) will use the correct type - /// auto-generated by `construct_runtime!`. + /// [`#[derive_impl(..)]`](`frame_support::derive_impl`) will use the correct + /// type auto-generated by + /// `construct_runtime!`. #[doc = docify::embed!("src/tests/inject_runtime_type.rs", derive_impl_works_with_runtime_type_injection)] /// /// However, if `no_aggregated_types` is specified while using - /// `[`#[derive_impl(..)]`](`macro@super::derive_impl`)`, then these items are attached + /// `[`#[derive_impl(..)]`](`frame_support::derive_impl`)`, then these items are attached /// verbatim to the combined impl. #[doc = docify::embed!("src/tests/inject_runtime_type.rs", derive_impl_works_with_no_aggregated_types)] pub use frame_support_procedural::inject_runtime_type; @@ -931,130 +933,26 @@ pub mod pallet_prelude { pub use sp_weights::Weight; } -/// The `pallet` attribute macro defines a pallet that can be used with -/// [`construct_runtime!`]. It must be attached to a module named `pallet` as follows: +/// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to +/// specify pallet information. /// -/// ```ignore -/// #[pallet] -/// pub mod pallet { -/// ... -/// } +/// The struct must be defined as follows: /// ``` +/// #[frame_support::pallet] +/// mod pallet { +/// #[pallet::pallet] // <- the macro +/// pub struct Pallet(_); // <- the struct definition /// -/// Note that various types can be automatically imported using -/// [`frame_support::pallet_prelude`] and `frame_system::pallet_prelude`: -/// -/// ```ignore -/// #[pallet] -/// pub mod pallet { -/// use frame_support::pallet_prelude::*; -/// use frame_system::pallet_prelude::*; -/// ... +/// #[pallet::config] +/// pub trait Config: frame_system::Config {} /// } /// ``` /// -/// # pallet::* Attributes -/// -/// The `pallet` macro will parse any items within your `pallet` module that are annotated with -/// `#[pallet::*]` attributes. Some of these attributes are mandatory and some are optional, -/// and they can attach to different types of items within your pallet depending on the -/// attribute in question. The full list of `#[pallet::*]` attributes is shown below in the -/// order in which they are mentioned in this document: -/// -/// * [`pallet::pallet`](#pallet-struct-placeholder-palletpallet-mandatory) -/// * [`pallet::config`](#config-trait-palletconfig-mandatory) -/// * [`pallet::constant`](#palletconstant) -/// * [`pallet::disable_frame_system_supertrait_check`](#disable_supertrait_check) -/// * [`pallet::generate_store($vis trait Store)`](#palletgenerate_storevis-trait-store) -/// * [`pallet::storage_version`](#palletstorage_version) -/// * [`pallet::hooks`](#hooks-pallethooks-optional) -/// * [`pallet::call`](#call-palletcall-optional) -/// * [`pallet::weight($expr)`](#palletweightexpr) -/// * [`pallet::compact`](#palletcompact-some_arg-some_type) -/// * [`pallet::call_index($idx)`](#palletcall_indexidx) -/// * [`pallet::extra_constants`](#extra-constants-palletextra_constants-optional) -/// * [`pallet::error`](#error-palleterror-optional) -/// * [`pallet::event`](#event-palletevent-optional) -/// * [`pallet::generate_deposit($visibility fn -/// deposit_event)`](#palletgenerate_depositvisibility-fn-deposit_event) -/// * [`pallet::storage`](#storage-palletstorage-optional) -/// * [`pallet::getter(fn $my_getter_fn_name)`](#palletgetterfn-my_getter_fn_name-optional) -/// * [`pallet::storage_prefix = "SomeName"`](#palletstorage_prefix--somename-optional) -/// * [`pallet::unbounded`](#palletunbounded-optional) -/// * [`pallet::whitelist_storage`](#palletwhitelist_storage-optional) -/// * [`pallet::disable_try_decode_storage`](#palletdisable_try_decode_storage-optional) -/// * [`cfg(..)`](#cfg-for-storage) (on storage items) -/// * [`pallet::type_value`](#type-value-pallettype_value-optional) -/// * [`pallet::genesis_config`](#genesis-config-palletgenesis_config-optional) -/// * [`pallet::genesis_build`](#genesis-build-palletgenesis_build-optional) -/// * [`pallet::inherent`](#inherent-palletinherent-optional) -/// * [`pallet::validate_unsigned`](#validate-unsigned-palletvalidate_unsigned-optional) -/// * [`pallet::origin`](#origin-palletorigin-optional) -/// * [`pallet::composite_enum`](#composite-enum-palletcomposite_enum-optional) -/// -/// Note that at compile-time, the `#[pallet]` macro will analyze and expand all of these -/// attributes, ultimately removing their AST nodes before they can be parsed as real -/// attribute macro calls. This means that technically we do not need attribute macro -/// definitions for any of these attributes, however, for consistency and discoverability -/// reasons, we still maintain stub attribute macro definitions for all of these attributes in -/// the [`pallet_macros`] module which is automatically included in all pallets as part of the -/// pallet prelude. The actual "work" for all of these attribute macros can be found in the -/// macro expansion for `#[pallet]`. -/// -/// Also note that in this document, pallet attributes are explained using the syntax of -/// non-instantiable pallets. For an example of an instantiable pallet, see [this -/// example](#example-of-an-instantiable-pallet). -/// -/// # Dev Mode (`#[pallet(dev_mode)]`) -/// -/// Specifying the argument `dev_mode` on the `#[pallet]` or `#[frame_support::pallet]` -/// attribute attached to your pallet module will allow you to enable dev mode for a pallet. -/// The aim of dev mode is to loosen some of the restrictions and requirements placed on -/// production pallets for easy tinkering and development. Dev mode pallets should not be used -/// in production. Enabling dev mode has the following effects: -/// -/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By -/// default, dev mode pallets will assume a weight of zero (`0`) if a weight is not -/// specified. This is equivalent to specifying `#[weight(0)]` on all calls that do not -/// specify a weight. -/// * Call indices no longer need to be specified on every `#[pallet::call]` declaration. By -/// default, dev mode pallets will assume a call index based on the order of the call. -/// * All storages are marked as unbounded, meaning you do not need to implement -/// `MaxEncodedLen` on storage types. This is equivalent to specifying `#[pallet::unbounded]` -/// on all storage type definitions. -/// * Storage hashers no longer need to be specified and can be replaced by `_`. In dev mode, -/// these will be replaced by `Blake2_128Concat`. In case of explicit key-binding, `Hasher` -/// can simply be ignored when in `dev_mode`. -/// -/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or -/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This argument -/// cannot be specified anywhere else, including but not limited to the `#[pallet::pallet]` -/// attribute macro. -/// -///
-/// WARNING:
-/// You should not deploy or use dev mode pallets in production. Doing so can break your chain
-/// and therefore should never be done. Once you are done tinkering, you should remove the
-/// 'dev_mode' argument from your #[pallet] declaration and fix any compile errors before
-/// attempting to use your pallet in a production scenario.
-/// 
-/// -/// # Pallet struct placeholder: `#[pallet::pallet]` (mandatory) -/// -/// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to specify -/// pallet information. -/// -/// The struct must be defined as follows: -/// ```ignore -/// #[pallet::pallet] -/// pub struct Pallet(_); -/// ``` /// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. /// /// ## Macro expansion: /// -/// The macro adds this attribute to the struct definition: +/// The macro adds this attribute to the Pallet struct definition: /// ```ignore /// #[derive( /// frame_support::CloneNoBound, @@ -1063,1233 +961,947 @@ pub mod pallet_prelude { /// frame_support::RuntimeDebugNoBound, /// )] /// ``` -/// and replaces the type `_` with `PhantomData`. It also implements on the pallet: -/// * [`GetStorageVersion`](`traits::GetStorageVersion`) -/// * [`OnGenesis`](`traits::OnGenesis`): contains some logic to write the pallet version into -/// storage. -/// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. -/// -/// It declares `type Module` type alias for `Pallet`, used by `construct_runtime`. +/// and replaces the type `_` with `PhantomData`. /// -/// It implements [`PalletInfoAccess`](`traits::PalletInfoAccess') on `Pallet` to ease access -/// to pallet information given by [`frame_support::traits::PalletInfo`]. (The implementation -/// uses the associated type `frame_system::Config::PalletInfo`). +/// It also implements on the pallet: /// -/// It implements [`StorageInfoTrait`](`traits::StorageInfoTrait`) on `Pallet` which give -/// information about all storages. -/// -/// If the attribute `generate_store` is set then the macro creates the trait `Store` and -/// implements it on `Pallet`. +/// * [`GetStorageVersion`](frame_support::traits::GetStorageVersion) +/// * [`OnGenesis`](frame_support::traits::OnGenesis): contains some logic to write the pallet +/// version into storage. +/// * [`PalletInfoAccess`](frame_support::traits::PalletInfoAccess) to ease access to pallet +/// information given by [`frame_support::traits::PalletInfo`]. (The implementation uses the +/// associated type [`frame_support::traits::PalletInfo`]). +/// * [`StorageInfoTrait`](frame_support::traits::StorageInfoTrait) to give information about +/// storages. /// /// If the attribute `set_storage_max_encoded_len` is set then the macro calls -/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for each storage in the implementation of -/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for the pallet. Otherwise it implements -/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for the pallet using the -/// [`PartialStorageInfoTrait`](`traits::PartialStorageInfoTrait`) implementation of storages. -/// -/// # Config trait: `#[pallet::config]` (mandatory) -/// -/// The mandatory attribute `#[pallet::config]` defines the configurable options for the -/// pallet. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::config] -/// pub trait Config: frame_system::Config + $optionally_some_other_supertraits -/// $optional_where_clause -/// { -/// ... -/// } -/// ``` -/// -/// I.e. a regular trait definition named `Config`, with the supertrait -/// `frame_system::pallet::Config`, and optionally other supertraits and a where clause. -/// (Specifying other supertraits here is known as [tight -/// coupling](https://docs.substrate.io/reference/how-to-guides/pallet-design/use-tight-coupling/)) -/// -/// The associated type `RuntimeEvent` is reserved. If defined, it must have the bounds -/// `From` and `IsType<::RuntimeEvent>`. -/// -/// [`pallet::event`](`frame_support::pallet_macros::event`) must be present if `RuntimeEvent` -/// exists as a config item in your `#[pallet::config]`. -/// -/// Also see [`pallet::config`](`frame_support::pallet_macros::config`) -/// -/// ## `pallet::constant` -/// -/// The `#[pallet::constant]` attribute can be used to add an associated type trait bounded by -/// [`Get`](crate::traits::Get) from [`pallet::config`](#palletconfig) into metadata, e.g.: +/// [`StorageInfoTrait`](frame_support::traits::StorageInfoTrait) for each storage in the +/// implementation of [`StorageInfoTrait`](frame_support::traits::StorageInfoTrait) for the +/// pallet. Otherwise it implements +/// [`StorageInfoTrait`](frame_support::traits::StorageInfoTrait) for the pallet using the +/// [`PartialStorageInfoTrait`](frame_support::traits::PartialStorageInfoTrait) +/// implementation of storages. /// -/// ```ignore -/// #[pallet::config] -/// pub trait Config: frame_system::Config { -/// #[pallet::constant] -/// type Foo: Get; -/// } -/// ``` -/// -/// Also see [`pallet::constant`](`frame_support::pallet_macros::constant`) -/// -/// ## `pallet::disable_frame_system_supertrait_check` -/// -/// -/// To bypass the `frame_system::Config` supertrait check, use the attribute -/// `pallet::disable_frame_system_supertrait_check`, e.g.: -/// -/// ```ignore -/// #[pallet::config] -/// #[pallet::disable_frame_system_supertrait_check] -/// pub trait Config: pallet_timestamp::Config {} -/// ``` -/// -/// NOTE: Bypassing the `frame_system::Config` supertrait check is typically desirable when you -/// want to write an alternative to the `frame_system` pallet. -/// -/// Also see -/// [`pallet::disable_frame_system_supertrait_check`](`frame_support::pallet_macros::disable_frame_system_supertrait_check`) -/// -/// ## Macro expansion: -/// -/// The macro expands pallet constant metadata with the information given by -/// `#[pallet::constant]`. -/// -/// # `pallet::generate_store($vis trait Store)` -/// -/// To generate a `Store` trait associating all storages, annotate your `Pallet` struct with -/// the attribute `#[pallet::generate_store($vis trait Store)]`, e.g.: -/// -/// ```ignore -/// #[pallet::pallet] -/// #[pallet::generate_store(pub(super) trait Store)] -/// pub struct Pallet(_); -/// ``` -/// More precisely, the `Store` trait contains an associated type for each storage. It is -/// implemented for `Pallet` allowing access to the storage from pallet struct. -/// -/// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using -/// `::Foo`. -/// -/// NOTE: this attribute is only valid when applied _directly_ to your `Pallet` struct -/// definition. -/// -/// Also see [`pallet::generate_store`](`frame_support::pallet_macros::generate_store`). -/// -/// # `pallet::storage_version` -/// -/// Because the [`pallet::pallet`](#pallet-struct-placeholder-palletpallet-mandatory) macro -/// implements [`traits::GetStorageVersion`], the in-code storage version needs to be -/// communicated to the macro. This can be done by using the `pallet::storage_version` -/// attribute: -/// -/// ```ignore -/// const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); -/// -/// #[pallet::pallet] -/// #[pallet::storage_version(STORAGE_VERSION)] -/// pub struct Pallet(_); -/// ``` -/// -/// If not present, the in-code storage version is set to the default value. -/// -/// Also see [`pallet::storage_version`](`frame_support::pallet_macros::storage_version`) -/// -/// # Hooks: `#[pallet::hooks]` (optional) -/// -/// The `pallet::hooks` attribute allows you to specify a `Hooks` implementation for `Pallet` -/// that specifies pallet-specific logic. -/// -/// The item the attribute attaches to must be defined as follows: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks> for Pallet $optional_where_clause { -/// ... -/// } -/// ``` -/// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait -/// `Hooks>` (they are defined in preludes), for the type `Pallet` and -/// with an optional where clause. -/// -/// If no `#[pallet::hooks]` exists, then the following default implementation is -/// automatically generated: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks> for Pallet {} -/// ``` -/// -/// Also see [`pallet::hooks`](`frame_support::pallet_macros::hooks`) -/// -/// # Call: `#[pallet::call]` (optional) -/// -/// Implementation of pallet dispatchables. -/// -/// Item must be defined as: -/// ```ignore -/// #[pallet::call] -/// impl Pallet { -/// /// $some_doc -/// #[pallet::weight($ExpressionResultingInWeight)] -/// pub fn $fn_name( -/// origin: OriginFor, -/// $some_arg: $some_type, -/// // or with compact attribute: #[pallet::compact] $some_arg: $some_type, -/// ... -/// ) -> DispatchResultWithPostInfo { // or `-> DispatchResult` -/// ... -/// } -/// ... -/// } -/// ``` -/// I.e. a regular type implementation, with generic `T: Config`, on type `Pallet`, with -/// an optional where clause. +/// ## Dev Mode (`#[pallet(dev_mode)]`) /// -/// ## `#[pallet::weight($expr)]` +/// Specifying the argument `dev_mode` will allow you to enable dev mode for a pallet. The +/// aim of dev mode is to loosen some of the restrictions and requirements placed on +/// production pallets for easy tinkering and development. Dev mode pallets should not be +/// used in production. Enabling dev mode has the following effects: /// -/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, the -/// first argument must be `origin: OriginFor`. -/// -/// Also see [`pallet::weight`](`frame_support::pallet_macros::weight`) -/// -/// ### `#[pallet::compact] $some_arg: $some_type` -/// -/// Compact encoding for arguments can be achieved via `#[pallet::compact]`. The function must -/// return a `DispatchResultWithPostInfo` or `DispatchResult`. -/// -/// Also see [`pallet::compact`](`frame_support::pallet_macros::compact`) -/// -/// ## `#[pallet::call_index($idx)]` -/// -/// Each dispatchable may also be annotated with the `#[pallet::call_index($idx)]` attribute, -/// which explicitly defines the codec index for the dispatchable function in the `Call` enum. -/// -/// All call indexes start from 0, until it encounters a dispatchable function with a defined -/// call index. The dispatchable function that lexically follows the function with a defined -/// call index will have that call index, but incremented by 1, e.g. if there are 3 -/// dispatchable functions `fn foo`, `fn bar` and `fn qux` in that order, and only `fn bar` -/// has a call index of 10, then `fn qux` will have an index of 11, instead of 1. -/// -/// **WARNING**: modifying dispatchables, changing their order, removing some, etc., must be -/// done with care. Indeed this will change the outer runtime call type (which is an enum with -/// one variant per pallet), this outer runtime call can be stored on-chain (e.g. in -/// `pallet-scheduler`). Thus migration might be needed. To mitigate against some of this, the -/// `#[pallet::call_index($idx)]` attribute can be used to fix the order of the dispatchable so -/// that the `Call` enum encoding does not change after modification. As a general rule of -/// thumb, it is therefore adventageous to always add new calls to the end so you can maintain -/// the existing order of calls. -/// -/// Also see [`pallet::call_index`](`frame_support::pallet_macros::call_index`) -/// -/// # Extra constants: `#[pallet::extra_constants]` (optional) -/// -/// Allows you to define some extra constants to be added into constant metadata. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::extra_constants] -/// impl Pallet where $optional_where_clause { -/// /// $some_doc -/// $vis fn $fn_name() -> $some_return_type { -/// ... -/// } -/// ... -/// } -/// ``` -/// I.e. a regular rust `impl` block with some optional where clause and functions with 0 args, -/// 0 generics, and some return type. -/// -/// ## Macro expansion -/// -/// The macro add some extra constants to pallet constant metadata. -/// -/// Also see: [`pallet::extra_constants`](`frame_support::pallet_macros::extra_constants`) -/// -/// # Error: `#[pallet::error]` (optional) -/// -/// The `#[pallet::error]` attribute allows you to define an error enum that will be returned -/// from the dispatchable when an error occurs. The information for this error type is then -/// stored in metadata. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::error] -/// pub enum Error { -/// /// $some_optional_doc -/// $SomeFieldLessVariant, -/// /// $some_more_optional_doc -/// $SomeVariantWithOneField(FieldType), -/// ... -/// } -/// ``` -/// I.e. a regular enum named `Error`, with generic `T` and fieldless or multiple-field -/// variants. -/// -/// Any field type in the enum variants must implement [`scale_info::TypeInfo`] in order to be -/// properly used in the metadata, and its encoded size should be as small as possible, -/// preferably 1 byte in size in order to reduce storage size. The error enum itself has an -/// absolute maximum encoded size specified by [`MAX_MODULE_ERROR_ENCODED_SIZE`]. -/// -/// (1 byte can still be 256 different errors. The more specific the error, the easier it is to -/// diagnose problems and give a better experience to the user. Don't skimp on having lots of -/// individual error conditions.) -/// -/// Field types in enum variants must also implement [`PalletError`](traits::PalletError), -/// otherwise the pallet will fail to compile. Rust primitive types have already implemented -/// the [`PalletError`](traits::PalletError) trait along with some commonly used stdlib types -/// such as [`Option`] and -/// [`PhantomData`](`frame_support::__private::sp_std::marker::PhantomData`), and hence in most -/// use cases, a manual implementation is not necessary and is discouraged. -/// -/// The generic `T` must not bound anything and a `where` clause is not allowed. That said, -/// bounds and/or a where clause should not needed for any use-case. -/// -/// Also see: [`pallet::error`](`frame_support::pallet_macros::error`) -/// -/// # Event: `#[pallet::event]` (optional) -/// -/// Allows you to define pallet events. Pallet events are stored under the `system` / `events` -/// key when the block is applied (and then replaced when the next block writes it's events). -/// -/// The Event enum must be defined as follows: -/// -/// ```ignore -/// #[pallet::event] -/// #[pallet::generate_deposit($visibility fn deposit_event)] // Optional -/// pub enum Event<$some_generic> $optional_where_clause { -/// /// Some doc -/// $SomeName($SomeType, $YetanotherType, ...), -/// ... -/// } -/// ``` -/// -/// I.e. an enum (with named or unnamed fields variant), named `Event`, with generic: none or -/// `T` or `T: Config`, and optional w here clause. -/// -/// Each field must implement [`Clone`], [`Eq`], [`PartialEq`], [`Encode`], [`Decode`], and -/// [`Debug`] (on std only). For ease of use, bound by the trait -/// [`Member`](`frame_support::pallet_prelude::Member`), available in -/// frame_support::pallet_prelude. -/// -/// Also see [`pallet::event`](`frame_support::pallet_macros::event`) -/// -/// ## `#[pallet::generate_deposit($visibility fn deposit_event)]` -/// -/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generates a -/// helper function on `Pallet` that handles deposit events. -/// -/// NOTE: For instantiable pallets, the event must be generic over `T` and `I`. -/// -/// Also see [`pallet::generate_deposit`](`frame_support::pallet_macros::generate_deposit`) -/// -/// # Storage: `#[pallet::storage]` (optional) -/// -/// The `#[pallet::storage]` attribute lets you define some abstract storage inside of runtime -/// storage and also set its metadata. This attribute can be used multiple times. -/// -/// Item should be defined as: -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn $getter_name)] // optional -/// $vis type $StorageName<$some_generic> $optional_where_clause -/// = $StorageType<$generic_name = $some_generics, $other_name = $some_other, ...>; -/// ``` -/// -/// or with unnamed generic: -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn $getter_name)] // optional -/// $vis type $StorageName<$some_generic> $optional_where_clause -/// = $StorageType<_, $some_generics, ...>; -/// ``` -/// -/// I.e. it must be a type alias, with generics: `T` or `T: Config`. The aliased type must be -/// one of [`StorageValue`](`pallet_prelude::StorageValue`), -/// [`StorageMap`](`pallet_prelude::StorageMap`) or -/// [`StorageDoubleMap`](`pallet_prelude::StorageDoubleMap`). The generic arguments of the -/// storage type can be given in two manners: named and unnamed. For named generic arguments, -/// the name for each argument should match the name defined for it on the storage struct: -/// * [`StorageValue`](`pallet_prelude::StorageValue`) expects `Value` and optionally -/// `QueryKind` and `OnEmpty`, -/// * [`StorageMap`](`pallet_prelude::StorageMap`) expects `Hasher`, `Key`, `Value` and -/// optionally `QueryKind` and `OnEmpty`, -/// * [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`) expects `Hasher`, `Key`, -/// `Value` and optionally `QueryKind` and `OnEmpty`, -/// * [`StorageDoubleMap`](`pallet_prelude::StorageDoubleMap`) expects `Hasher1`, `Key1`, -/// `Hasher2`, `Key2`, `Value` and optionally `QueryKind` and `OnEmpty`. -/// -/// For unnamed generic arguments: Their first generic must be `_` as it is replaced by the -/// macro and other generic must declared as a normal generic type declaration. -/// -/// The `Prefix` generic written by the macro is generated using -/// `PalletInfo::name::>()` and the name of the storage type. E.g. if runtime names -/// the pallet "MyExample" then the storage `type Foo = ...` should use the prefix: -/// `Twox128(b"MyExample") ++ Twox128(b"Foo")`. -/// -/// For the [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`) variant, the `Prefix` -/// also implements -/// [`CountedStorageMapInstance`](`frame_support::storage::types::CountedStorageMapInstance`). -/// It also associates a [`CounterPrefix`](`pallet_prelude::CounterPrefix'), which is -/// implemented the same as above, but the storage prefix is prepend with `"CounterFor"`. E.g. -/// if runtime names the pallet "MyExample" then the storage `type Foo = -/// CountedStorageaMap<...>` will store its counter at the prefix: `Twox128(b"MyExample") ++ -/// Twox128(b"CounterForFoo")`. -/// -/// E.g: -/// -/// ```ignore -/// #[pallet::storage] -/// pub(super) type MyStorage = StorageMap; -/// ``` -/// -/// In this case the final prefix used by the map is `Twox128(b"MyExample") ++ -/// Twox128(b"OtherName")`. -/// -/// Also see [`pallet::storage`](`frame_support::pallet_macros::storage`) -/// -/// ## `#[pallet::getter(fn $my_getter_fn_name)]` (optional) -/// -/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows you to define a -/// getter function on `Pallet`. -/// -/// Also see [`pallet::getter`](`frame_support::pallet_macros::getter`) -/// -/// ## `#[pallet::storage_prefix = "SomeName"]` (optional) -/// -/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allows you to define the -/// storage prefix to use, see how `Prefix` generic is implemented above. This is helpful if -/// you wish to rename the storage field but don't want to perform a migration. -/// -/// E.g: -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::storage_prefix = "foo"] -/// #[pallet::getter(fn my_storage)] -/// pub(super) type MyStorage = StorageMap; -/// ``` -/// -/// or -/// -/// ```ignore -/// #[pallet::storage] -/// #[pallet::getter(fn my_storage)] -/// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; -/// ``` -/// -/// Also see [`pallet::storage_prefix`](`frame_support::pallet_macros::storage_prefix`) -/// -/// ## `#[pallet::unbounded]` (optional) -/// -/// The optional attribute `#[pallet::unbounded]` declares the storage as unbounded. When -/// implementating the storage info (when `#[pallet::generate_storage_info]` is specified on -/// the pallet struct placeholder), the size of the storage will be declared as unbounded. This -/// can be useful for storage which can never go into PoV (Proof of Validity). -/// -/// Also see [`pallet::unbounded`](`frame_support::pallet_macros::unbounded`) -/// -/// ## `#[pallet::whitelist_storage]` (optional) -/// -/// The optional attribute `#[pallet::whitelist_storage]` will declare the storage as -/// whitelisted from benchmarking. -/// -/// See -/// [`pallet::whitelist_storage`](frame_support::pallet_macros::whitelist_storage) -/// for more info. -/// -/// ## `#[pallet::disable_try_decode_storage]` (optional) -/// -/// The optional attribute `#[pallet::disable_try_decode_storage]` will declare the storage as -/// whitelisted state decoding during try-runtime logic. -/// -/// See -/// [`pallet::disable_try_decode_storage`](frame_support::pallet_macros::disable_try_decode_storage) -/// for more info. -/// -/// ## `#[cfg(..)]` (for storage) -/// The optional attributes `#[cfg(..)]` allow conditional compilation for the storage. -/// -/// E.g: -/// -/// ```ignore -/// #[cfg(feature = "my-feature")] -/// #[pallet::storage] -/// pub(super) type MyStorage = StorageValue; -/// ``` -/// -/// All the `cfg` attributes are automatically copied to the items generated for the storage, -/// i.e. the getter, storage prefix, and the metadata element etc. -/// -/// Any type placed as the `QueryKind` parameter must implement -/// [`frame_support::storage::types::QueryKindTrait`]. There are 3 implementations of this -/// trait by default: -/// -/// 1. [`OptionQuery`](`frame_support::storage::types::OptionQuery`), the default `QueryKind` -/// used when this type parameter is omitted. Specifying this as the `QueryKind` would cause -/// storage map APIs that return a `QueryKind` to instead return an [`Option`], returning -/// `Some` when a value does exist under a specified storage key, and `None` otherwise. -/// 2. [`ValueQuery`](`frame_support::storage::types::ValueQuery`) causes storage map APIs that -/// return a `QueryKind` to instead return the value type. In cases where a value does not -/// exist under a specified storage key, the `OnEmpty` type parameter on `QueryKindTrait` is -/// used to return an appropriate value. -/// 3. [`ResultQuery`](`frame_support::storage::types::ResultQuery`) causes storage map APIs -/// that return a `QueryKind` to instead return a `Result`, with `T` being the value -/// type and `E` being the pallet error type specified by the `#[pallet::error]` attribute. -/// In cases where a value does not exist under a specified storage key, an `Err` with the -/// specified pallet error variant is returned. -/// -/// NOTE: If the `QueryKind` generic parameter is still generic at this stage or is using some -/// type alias then the generation of the getter might fail. In this case the getter can be -/// implemented manually. -/// -/// NOTE: The generic `Hasher` must implement the [`StorageHasher`] trait (or the type is not -/// usable at all). We use [`StorageHasher::METADATA`] for the metadata of the hasher of the -/// storage item. Thus generic hasher is supported. -/// -/// ## Macro expansion -/// -/// For each storage item the macro generates a struct named -/// `_GeneratedPrefixForStorage$NameOfStorage`, and implements -/// [`StorageInstance`](traits::StorageInstance) on it using the pallet and storage name. It -/// then uses it as the first generic of the aliased type. For -/// [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`), -/// [`CountedStorageMapInstance`](`frame_support::storage::types::CountedStorageMapInstance`) -/// is implemented, and another similar struct is generated. -/// -/// For a named generic, the macro will reorder the generics, and remove the names. -/// -/// The macro implements the function `storage_metadata` on the `Pallet` implementing the -/// metadata for all storage items based on their kind: -/// * for a storage value, the type of the value is copied into the metadata -/// * for a storage map, the type of the values and the key's type is copied into the metadata -/// * for a storage double map, the type of the values, and the types of `key1` and `key2` are -/// copied into the metadata. -/// -/// # Type value: `#[pallet::type_value]` (optional) -/// -/// The `#[pallet::type_value]` attribute lets you define a struct implementing the -/// [`Get`](crate::traits::Get) trait to ease use of storage types. This attribute is meant to -/// be used alongside [`#[pallet::storage]`](#storage-palletstorage-optional) to define a -/// storage's default value. This attribute can be used multiple times. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::type_value] -/// fn $MyDefaultName<$some_generic>() -> $default_type $optional_where_clause { $expr } -/// ``` -/// -/// I.e.: a function definition with generics none or `T: Config` and a returned type. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::type_value] -/// fn MyDefault() -> T::Balance { 3.into() } -/// ``` -/// -/// Also see [`pallet::type_value`](`frame_support::pallet_macros::type_value`) -/// -/// # Genesis config: `#[pallet::genesis_config]` (optional) -/// -/// The `#[pallet::genesis_config]` attribute allows you to define the genesis configuration -/// for the pallet. -/// -/// Item is defined as either an enum or a struct. It needs to be public and implement the -/// trait [`BuildGenesisConfig`](`traits::BuildGenesisConfig`) with -/// [`#[pallet::genesis_build]`](#genesis-build-palletgenesis_build-optional). The type -/// generics are constrained to be either none, or `T` or `T: Config`. -/// -/// E.g: -/// -/// ```ignore -/// #[pallet::genesis_config] -/// pub struct GenesisConfig { -/// _myfield: BalanceOf, -/// } -/// ``` -/// -/// Also see [`pallet::genesis_config`](`frame_support::pallet_macros::genesis_config`) -/// -/// # Genesis build: `#[pallet::genesis_build]` (optional) -/// -/// The `#[pallet::genesis_build]` attribute allows you to define how `genesis_configuration` -/// is built. This takes as input the `GenesisConfig` type (as `self`) and constructs the -/// pallet's initial state. -/// -/// The impl must be defined as: -/// -/// ```ignore -/// #[pallet::genesis_build] -/// impl GenesisBuild for GenesisConfig<$maybe_generics> { -/// fn build(&self) { $expr } -/// } -/// ``` -/// -/// I.e. a trait implementation with generic `T: Config`, of trait `GenesisBuild` on -/// type `GenesisConfig` with generics none or `T`. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::genesis_build] -/// impl GenesisBuild for GenesisConfig { -/// fn build(&self) {} -/// } -/// ``` -/// -/// Also see [`pallet::genesis_build`](`frame_support::pallet_macros::genesis_build`) -/// -/// # Inherent: `#[pallet::inherent]` (optional) -/// -/// The `#[pallet::inherent]` attribute allows the pallet to provide some -/// [inherent](https://docs.substrate.io/fundamentals/transaction-types/#inherent-transactions). -/// An inherent is some piece of data that is inserted by a block authoring node at block -/// creation time and can either be accepted or rejected by validators based on whether the -/// data falls within an acceptable range. -/// -/// The most common inherent is the `timestamp` that is inserted into every block. Since there -/// is no way to validate timestamps, validators simply check that the timestamp reported by -/// the block authoring node falls within an acceptable range. -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::inherent] -/// impl ProvideInherent for Pallet { -/// // ... regular trait implementation -/// } -/// ``` -/// -/// I.e. a trait implementation with bound `T: Config`, of trait -/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`) for type `Pallet`, and some -/// optional where clause. -/// -/// Also see [`pallet::inherent`](`frame_support::pallet_macros::inherent`) -/// -/// # Validate unsigned: `#[pallet::validate_unsigned]` (optional) -/// -/// The `#[pallet::validate_unsigned]` attribute allows the pallet to validate some unsigned -/// transaction: -/// -/// Item must be defined as: -/// -/// ```ignore -/// #[pallet::validate_unsigned] -/// impl ValidateUnsigned for Pallet { -/// // ... regular trait implementation -/// } -/// ``` -/// -/// I.e. a trait implementation with bound `T: Config`, of trait -/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) for type `Pallet`, and some -/// optional where clause. -/// -/// NOTE: There is also the [`sp_runtime::traits::SignedExtension`] trait that can be used to -/// add some specific logic for transaction validation. -/// -/// Also see [`pallet::validate_unsigned`](`frame_support::pallet_macros::validate_unsigned`) -/// -/// # Origin: `#[pallet::origin]` (optional) -/// -/// The `#[pallet::origin]` attribute allows you to define some origin for the pallet. -/// -/// Item must be either a type alias, an enum, or a struct. It needs to be public. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::origin] -/// pub struct Origin(PhantomData<(T)>); -/// ``` -/// -/// **WARNING**: modifying origin changes the outer runtime origin. This outer runtime origin -/// can be stored on-chain (e.g. in `pallet-scheduler`), thus any change must be done with care -/// as it might require some migration. -/// -/// NOTE: for instantiable pallets, the origin must be generic over `T` and `I`. -/// -/// Also see [`pallet::origin`](`frame_support::pallet_macros::origin`) -/// -/// # Composite enum `#[pallet::composite_enum]` (optional) -/// -/// The `#[pallet::composite_enum]` attribute allows you to define an enum on the pallet which -/// will then instruct `construct_runtime` to amalgamate all similarly-named enums from other -/// pallets into an aggregate enum. This is similar in principle with how the aggregate enum is -/// generated for `#[pallet::event]` or `#[pallet::error]`. -/// -/// The item tagged with `#[pallet::composite_enum]` MUST be an enum declaration, and can ONLY -/// be the following identifiers: `FreezeReason`, `HoldReason`, `LockId` or `SlashReason`. -/// Custom identifiers are not supported. -/// -/// NOTE: For ease of usage, when no `#[derive]` attributes are detected, the -/// `#[pallet::composite_enum]` attribute will automatically derive the following traits for -/// the enum: -/// -/// ```ignore -/// Copy, Clone, Eq, PartialEq, Encode, Decode, MaxEncodedLen, TypeInfo, RuntimeDebug -/// ``` -/// -/// The inverse is also true: if there are any #[derive] attributes present for the enum, then -/// the attribute will not automatically derive any of the traits described above. -/// -/// # General notes on instantiable pallets -/// -/// An instantiable pallet is one where Config is generic, i.e. `Config`. This allows -/// runtime to implement multiple instances of the pallet, by using different types for the -/// generic. This is the sole purpose of the generic `I`, but because -/// [`PalletInfo`](`traits::PalletInfo`) requires the `Pallet` placeholder to be static, it is -/// important to bound by `'static` whenever [`PalletInfo`](`traits::PalletInfo`) can be used. -/// Additionally, in order to make an instantiable pallet usable as a regular pallet without an -/// instance, it is important to bound by `= ()` on every type. -/// -/// Thus impl bound looks like `impl, I: 'static>`, and types look like -/// `SomeType` or `SomeType, I: 'static = ()>`. -/// -/// # Example of a non-instantiable pallet -/// -/// ``` -/// pub use pallet::*; // reexport in crate namespace for `construct_runtime!` -/// -/// #[frame_support::pallet] -/// // NOTE: The name of the pallet is provided by `construct_runtime` and is used as -/// // the unique identifier for the pallet's storage. It is not defined in the pallet itself. -/// pub mod pallet { -/// use frame_support::pallet_prelude::*; // Import various types used in the pallet definition -/// use frame_system::pallet_prelude::*; // Import some system helper types. -/// -/// type BalanceOf = ::Balance; -/// -/// // Define the generic parameter of the pallet -/// // The macro parses `#[pallet::constant]` attributes and uses them to generate metadata -/// // for the pallet's constants. -/// #[pallet::config] -/// pub trait Config: frame_system::Config { -/// #[pallet::constant] // put the constant in metadata -/// type MyGetParam: Get; -/// type Balance: Parameter + MaxEncodedLen + From; -/// type RuntimeEvent: From> + IsType<::RuntimeEvent>; -/// } -/// -/// // Define some additional constant to put into the constant metadata. -/// #[pallet::extra_constants] -/// impl Pallet { -/// /// Some description -/// fn exra_constant_name() -> u128 { 4u128 } -/// } -/// -/// // Define the pallet struct placeholder, various pallet function are implemented on it. -/// #[pallet::pallet] -/// #[pallet::generate_store(pub(super) trait Store)] -/// pub struct Pallet(_); -/// -/// // Implement the pallet hooks. -/// #[pallet::hooks] -/// impl Hooks> for Pallet { -/// fn on_initialize(_n: BlockNumberFor) -> Weight { -/// unimplemented!(); -/// } -/// -/// // can implement also: on_finalize, on_runtime_upgrade, offchain_worker, ... -/// // see `Hooks` trait -/// } -/// -/// // Declare Call struct and implement dispatchables. -/// // -/// // WARNING: Each parameter used in functions must implement: Clone, Debug, Eq, PartialEq, -/// // Codec. -/// // -/// // The macro parses `#[pallet::compact]` attributes on function arguments and implements -/// // the `Call` encoding/decoding accordingly. -/// #[pallet::call] -/// impl Pallet { -/// /// Doc comment put in metadata -/// #[pallet::weight(0)] // Defines weight for call (function parameters are in scope) -/// pub fn toto( -/// origin: OriginFor, -/// #[pallet::compact] _foo: u32, -/// ) -> DispatchResultWithPostInfo { -/// let _ = origin; -/// unimplemented!(); -/// } -/// } -/// -/// // Declare the pallet `Error` enum (this is optional). -/// // The macro generates error metadata using the doc comment on each variant. -/// #[pallet::error] -/// pub enum Error { -/// /// doc comment put into metadata -/// InsufficientProposersBalance, -/// } -/// -/// // Declare pallet Event enum (this is optional). -/// // -/// // WARNING: Each type used in variants must implement: Clone, Debug, Eq, PartialEq, Codec. -/// // -/// // The macro generates event metadata, and derive Clone, Debug, Eq, PartialEq and Codec -/// #[pallet::event] -/// // Generate a funciton on Pallet to deposit an event. -/// #[pallet::generate_deposit(pub(super) fn deposit_event)] -/// pub enum Event { -/// /// doc comment put in metadata -/// // `::AccountId` is not defined in metadata list, the last -/// // Thus the metadata is `::AccountId`. -/// Proposed(::AccountId), -/// /// doc -/// // here metadata will be `Balance` as define in metadata list -/// Spending(BalanceOf), -/// // here metadata will be `Other` as define in metadata list -/// Something(u32), -/// } -/// -/// // Define a struct which implements `frame_support::traits::Get` (optional). -/// #[pallet::type_value] -/// pub(super) fn MyDefault() -> T::Balance { 3.into() } -/// -/// // Declare a storage item. Any amount of storage items can be declared (optional). -/// // -/// // Is expected either `StorageValue`, `StorageMap` or `StorageDoubleMap`. -/// // The macro generates the prefix type and replaces the first generic `_`. -/// // -/// // The macro expands the metadata for the storage item with the type used: -/// // * for a storage value the type of the value is copied into the metadata -/// // * for a storage map the type of the values and the type of the key is copied into the metadata -/// // * for a storage double map the types of the values and keys are copied into the -/// // metadata. -/// // -/// // NOTE: The generic `Hasher` must implement the `StorageHasher` trait (or the type is not -/// // usable at all). We use [`StorageHasher::METADATA`] for the metadata of the hasher of the -/// // storage item. Thus generic hasher is supported. -/// #[pallet::storage] -/// pub(super) type MyStorageValue = -/// StorageValue>; -/// -/// // Another storage declaration -/// #[pallet::storage] -/// #[pallet::getter(fn my_storage)] -/// #[pallet::storage_prefix = "SomeOtherName"] -/// pub(super) type MyStorage = -/// StorageMap; -/// -/// // Declare the genesis config (optional). -/// // -/// // The macro accepts either a struct or an enum; it checks that generics are consistent. -/// // -/// // Type must implement the `Default` trait. -/// #[pallet::genesis_config] -/// #[derive(frame_support::DefaultNoBound)] -/// pub struct GenesisConfig { -/// _config: sp_std::marker::PhantomData, -/// _myfield: u32, -/// } -/// -/// // Declare genesis builder. (This is need only if GenesisConfig is declared) -/// #[pallet::genesis_build] -/// impl BuildGenesisConfig for GenesisConfig { -/// fn build(&self) {} -/// } -/// -/// // Declare a pallet origin (this is optional). -/// // -/// // The macro accept type alias or struct or enum, it checks generics are consistent. -/// #[pallet::origin] -/// pub struct Origin(PhantomData); -/// -/// // Declare a hold reason (this is optional). -/// // -/// // Creates a hold reason for this pallet that is aggregated by `construct_runtime`. -/// // A similar enum can be defined for `FreezeReason`, `LockId` or `SlashReason`. -/// #[pallet::composite_enum] -/// pub enum HoldReason { -/// SomeHoldReason -/// } -/// -/// // Declare validate_unsigned implementation (this is optional). -/// #[pallet::validate_unsigned] -/// impl ValidateUnsigned for Pallet { -/// type Call = Call; -/// fn validate_unsigned( -/// source: TransactionSource, -/// call: &Self::Call -/// ) -> TransactionValidity { -/// Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) -/// } -/// } -/// -/// // Declare inherent provider for pallet (this is optional). -/// #[pallet::inherent] -/// impl ProvideInherent for Pallet { -/// type Call = Call; -/// type Error = InherentError; -/// -/// const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; -/// -/// fn create_inherent(_data: &InherentData) -> Option { -/// unimplemented!(); -/// } -/// -/// fn is_inherent(_call: &Self::Call) -> bool { -/// unimplemented!(); -/// } -/// } -/// -/// // Regular rust code needed for implementing ProvideInherent trait -/// -/// #[derive(codec::Encode, sp_runtime::RuntimeDebug)] -/// #[cfg_attr(feature = "std", derive(codec::Decode))] -/// pub enum InherentError { -/// } -/// -/// impl sp_inherents::IsFatalError for InherentError { -/// fn is_fatal_error(&self) -> bool { -/// unimplemented!(); -/// } -/// } -/// -/// pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"testpall"; -/// } -/// ``` -/// -/// # Example of an instantiable pallet -/// -/// ``` -/// pub use pallet::*; -/// -/// #[frame_support::pallet] -/// pub mod pallet { -/// use frame_support::pallet_prelude::*; -/// use frame_system::pallet_prelude::*; -/// -/// type BalanceOf = >::Balance; +/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By +/// default, dev mode pallets will assume a weight of zero (`0`) if a weight is not +/// specified. This is equivalent to specifying `#[weight(0)]` on all calls that do not +/// specify a weight. +/// * Call indices no longer need to be specified on every `#[pallet::call]` declaration. By +/// default, dev mode pallets will assume a call index based on the order of the call. +/// * All storages are marked as unbounded, meaning you do not need to implement +/// [`MaxEncodedLen`](frame_support::pallet_prelude::MaxEncodedLen) on storage types. This is +/// equivalent to specifying `#[pallet::unbounded]` on all storage type definitions. +/// * Storage hashers no longer need to be specified and can be replaced by `_`. In dev mode, +/// these will be replaced by `Blake2_128Concat`. In case of explicit key-binding, `Hasher` +/// can simply be ignored when in `dev_mode`. /// -/// #[pallet::config] -/// pub trait Config: frame_system::Config { -/// #[pallet::constant] -/// type MyGetParam: Get; -/// type Balance: Parameter + MaxEncodedLen + From; -/// type RuntimeEvent: From> + IsType<::RuntimeEvent>; -/// } -/// -/// #[pallet::extra_constants] -/// impl, I: 'static> Pallet { -/// /// Some description -/// fn extra_constant_name() -> u128 { 4u128 } -/// } -/// -/// #[pallet::pallet] -/// #[pallet::generate_store(pub(super) trait Store)] -/// pub struct Pallet(PhantomData<(T, I)>); -/// -/// #[pallet::hooks] -/// impl, I: 'static> Hooks> for Pallet { -/// } -/// -/// #[pallet::call] -/// impl, I: 'static> Pallet { -/// /// Doc comment put in metadata -/// #[pallet::weight(0)] -/// pub fn toto(origin: OriginFor, #[pallet::compact] _foo: u32) -> DispatchResultWithPostInfo { -/// let _ = origin; -/// unimplemented!(); -/// } -/// } -/// -/// #[pallet::error] -/// pub enum Error { -/// /// doc comment put into metadata -/// InsufficientProposersBalance, -/// } -/// -/// #[pallet::event] -/// #[pallet::generate_deposit(pub(super) fn deposit_event)] -/// pub enum Event, I: 'static = ()> { -/// /// doc comment put in metadata -/// Proposed(::AccountId), -/// /// doc -/// Spending(BalanceOf), -/// Something(u32), -/// } -/// -/// #[pallet::type_value] -/// pub(super) fn MyDefault, I: 'static>() -> T::Balance { 3.into() } -/// -/// #[pallet::storage] -/// pub(super) type MyStorageValue, I: 'static = ()> = -/// StorageValue>; -/// -/// #[pallet::storage] -/// #[pallet::getter(fn my_storage)] -/// #[pallet::storage_prefix = "SomeOtherName"] -/// pub(super) type MyStorage = -/// StorageMap; -/// -/// #[pallet::genesis_config] -/// #[derive(frame_support::DefaultNoBound)] -/// pub struct GenesisConfig, I: 'static = ()> { -/// _config: sp_std::marker::PhantomData<(T,I)>, -/// _myfield: u32, -/// } -/// -/// #[pallet::genesis_build] -/// impl, I: 'static> BuildGenesisConfig for GenesisConfig { -/// fn build(&self) {} -/// } -/// -/// #[pallet::origin] -/// pub struct Origin(PhantomData<(T, I)>); -/// -/// #[pallet::composite_enum] -/// pub enum HoldReason { -/// SomeHoldReason -/// } -/// -/// #[pallet::validate_unsigned] -/// impl, I: 'static> ValidateUnsigned for Pallet { -/// type Call = Call; -/// fn validate_unsigned( -/// source: TransactionSource, -/// call: &Self::Call -/// ) -> TransactionValidity { -/// Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) -/// } -/// } -/// -/// #[pallet::inherent] -/// impl, I: 'static> ProvideInherent for Pallet { -/// type Call = Call; -/// type Error = InherentError; -/// -/// const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; -/// -/// fn create_inherent(_data: &InherentData) -> Option { -/// unimplemented!(); -/// } -/// -/// fn is_inherent(_call: &Self::Call) -> bool { -/// unimplemented!(); -/// } -/// } -/// -/// // Regular rust code needed for implementing ProvideInherent trait -/// -/// #[derive(codec::Encode, sp_runtime::RuntimeDebug)] -/// #[cfg_attr(feature = "std", derive(codec::Decode))] -/// pub enum InherentError { -/// } -/// -/// impl sp_inherents::IsFatalError for InherentError { -/// fn is_fatal_error(&self) -> bool { -/// unimplemented!(); -/// } -/// } -/// -/// pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"testpall"; -/// } -/// ``` +/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or +/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This +/// argument cannot be specified anywhere else, including but not limited to the +/// `#[pallet::pallet]` attribute macro. /// -/// # Upgrade guidelines -/// -/// 1. Export the metadata of the pallet for later checks -/// - run your node with the pallet active -/// - query the metadata using the `state_getMetadata` RPC and curl, or use `subsee -p -/// > meta.json` -/// 2. Generate the template upgrade for the pallet provided by `decl_storage` with the -/// environment variable `PRINT_PALLET_UPGRADE`: `PRINT_PALLET_UPGRADE=1 cargo check -p -/// my_pallet`. This template can be used as it contains all information for storages, -/// genesis config and genesis build. -/// 3. Reorganize the pallet to have the trait `Config`, `decl_*` macros, -/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`), -/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`), and Origin` all together in one -/// file. Suggested order: -/// * `Config`, -/// * `decl_module`, -/// * `decl_event`, -/// * `decl_error`, -/// * `decl_storage`, -/// * `origin`, -/// * `validate_unsigned`, -/// * `provide_inherent`, so far it should compile and all be correct. -/// 4. start writing the new pallet module -/// ```ignore -/// pub use pallet::*; -/// -/// #[frame_support::pallet] -/// pub mod pallet { -/// use frame_support::pallet_prelude::*; -/// use frame_system::pallet_prelude::*; -/// use super::*; -/// -/// #[pallet::pallet] -/// #[pallet::generate_store($visibility_of_trait_store trait Store)] -/// // NOTE: if the visibility of trait store is private but you want to make it available -/// // in super, then use `pub(super)` or `pub(crate)` to make it available in crate. -/// pub struct Pallet(_); -/// // pub struct Pallet(PhantomData); // for instantiable pallet -/// } -/// ``` -/// 5. **migrate Config**: move trait into the module with -/// * all const in `decl_module` to [`#[pallet::constant]`](#palletconstant) -/// * add the bound `IsType<::RuntimeEvent>` to `type -/// RuntimeEvent` -/// 7. **migrate decl_module**: write: -/// ```ignore -/// #[pallet::hooks] -/// impl Hooks for Pallet { -/// } -/// ``` -/// and write inside `on_initialize`, `on_finalize`, `on_runtime_upgrade`, -/// `offchain_worker`, and `integrity_test`. -/// -/// then write: -/// ```ignore -/// #[pallet::call] -/// impl Pallet { -/// } -/// ``` -/// and write inside all the calls in `decl_module` with a few changes in the signature: -/// - origin must now be written completely, e.g. `origin: OriginFor` -/// - result type must be `DispatchResultWithPostInfo`, you need to write it and also you -/// might need to put `Ok(().into())` at the end or the function. -/// - `#[compact]` must now be written -/// [`#[pallet::compact]`](#palletcompact-some_arg-some_type) -/// - `#[weight = ..]` must now be written [`#[pallet::weight(..)]`](#palletweightexpr) -/// -/// 7. **migrate event**: rewrite as a simple enum with the attribute -/// [`#[pallet::event]`](#event-palletevent-optional), use [`#[pallet::generate_deposit($vis -/// fn deposit_event)]`](#event-palletevent-optional) to generate `deposit_event`, -/// 8. **migrate error**: rewrite it with attribute -/// [`#[pallet::error]`](#error-palleterror-optional). -/// 9. **migrate storage**: `decl_storage` provide an upgrade template (see 3.). All storages, -/// genesis config, genesis build and default implementation of genesis config can be taken -/// from it directly. -/// -/// Otherwise here is the manual process: -/// -/// first migrate the genesis logic. write: -/// ```ignore -/// #[pallet::genesis_config] -/// struct GenesisConfig { -/// // fields of add_extra_genesis -/// } -/// impl Default for GenesisConfig { -/// // type default or default provided for fields -/// } -/// #[pallet::genesis_build] -/// impl GenesisBuild for GenesisConfig { -/// // for instantiable pallet: -/// // `impl GenesisBuild for GenesisConfig { -/// fn build() { -/// // The add_extra_genesis build logic -/// } -/// } -/// ``` -/// for each storage, if it contains `config(..)` then add fields, and make it default to -/// the value in `= ..;` or the type default if none, if it contains no build then also add -/// the logic to build the value. for each storage if it contains `build(..)` then add the -/// logic to `genesis_build`. -/// -/// NOTE: within `decl_storage`: the individual config is executed first, followed by the -/// build and finally the `add_extra_genesis` build. -/// -/// Once this is done you can migrate storages individually, a few notes: -/// - for private storage use `pub(crate) type ` or `pub(super) type` or nothing, -/// - for storages with `get(fn ..)` use [`#[pallet::getter(fn -/// ...)]`](#palletgetterfn-my_getter_fn_name-optional) -/// - for storages with value being `Option<$something>` make generic `Value` being -/// `$something` and generic `QueryKind` being `OptionQuery` (note: this is default). -/// Otherwise make `Value` the complete value type and `QueryKind` being `ValueQuery`. -/// - for storages with default value: `= $expr;` provide some specific `OnEmpty` generic. -/// To do so use of `#[pallet::type_value]` to generate the wanted struct to put. -/// example: `MyStorage: u32 = 3u32` would be written: -/// -/// ```ignore -/// #[pallet::type_value] fn MyStorageOnEmpty() -> u32 { 3u32 } -/// #[pallet::storage] -/// pub(super) type MyStorage = StorageValue<_, u32, ValueQuery, MyStorageOnEmpty>; -/// ``` -/// -/// NOTE: `decl_storage` also generates the functions `assimilate_storage` and -/// `build_storage` directly on `GenesisConfig`, and these are sometimes used in tests. -/// In order not to break they can be implemented manually, one can implement those -/// functions by calling the `GenesisBuild` implementation. -/// 10. **migrate origin**: move the origin to the pallet module to be under a -/// [`#[pallet::origin]`](#origin-palletorigin-optional) attribute -/// 11. **migrate validate_unsigned**: move the -/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) implementation to the pallet -/// module under a -/// [`#[pallet::validate_unsigned]`](#validate-unsigned-palletvalidate_unsigned-optional) -/// attribute -/// 12. **migrate provide_inherent**: move the -/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`) implementation to the pallet -/// module under a [`#[pallet::inherent]`](#inherent-palletinherent-optional) attribute -/// 13. rename the usage of `Module` to `Pallet` inside the crate. -/// 14. migration is done, now double check the migration with the checking migration -/// guidelines shown below. -/// -/// # Checking upgrade guidelines: -/// -/// * compare metadata. Use [subsee](https://github.com/ascjones/subsee) to fetch the metadata -/// and do a diff of the resulting json before and after migration. This checks for: -/// * call, names, signature, docs -/// * event names, docs -/// * error names, docs -/// * storage names, hasher, prefixes, default value -/// * error, error, constant -/// * manually check that: -/// * `Origin` was moved inside the macro under -/// [`#[pallet::origin]`](#origin-palletorigin-optional) if it exists -/// * [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) was moved inside the macro -/// under -/// [`#[pallet::validate_unsigned)]`](#validate-unsigned-palletvalidate_unsigned-optional) -/// if it exists -/// * [`ProvideInherent`](`pallet_prelude::ProvideInherent`) was moved inside the macro -/// under [`#[pallet::inherent)]`](#inherent-palletinherent-optional) if it exists -/// * `on_initialize` / `on_finalize` / `on_runtime_upgrade` / `offchain_worker` were moved -/// to the `Hooks` implementation -/// * storages with `config(..)` were converted to `GenesisConfig` field, and their default -/// is `= $expr;` if the storage has a default value -/// * storages with `build($expr)` or `config(..)` were built in `GenesisBuild::build` -/// * `add_extra_genesis` fields were converted to `GenesisConfig` field with their correct -/// default if specified -/// * `add_extra_genesis` build was written into `GenesisBuild::build` -/// * storage items defined with [`pallet`] use the name of the pallet provided by -/// [`traits::PalletInfo::name`] as `pallet_prefix` (in `decl_storage`, storage items used -/// the `pallet_prefix` given as input of `decl_storage` with the syntax `as Example`). Thus -/// a runtime using the pallet must be careful with this change. To handle this change: -/// * either ensure that the name of the pallet given to `construct_runtime!` is the same -/// as the name the pallet was giving to `decl_storage`, -/// * or do a storage migration from the old prefix used to the new prefix used. -/// -/// NOTE: The prefixes used by storage items are in metadata. Thus, ensuring the metadata -/// hasn't changed ensures that the `pallet_prefix`s used by the storage items haven't changed. -/// -/// # Notes when macro fails to show proper error message spans: -/// -/// Rustc loses span for some macro input. Some tips to fix it: -/// * do not use inner attribute: -/// ```ignore -/// #[pallet] -/// pub mod pallet { -/// //! This inner attribute will make span fail -/// .. -/// } -/// ``` -/// * use the newest nightly possible. +///
+/// WARNING:
+/// You should not deploy or use dev mode pallets in production. Doing so can break your
+/// chain and therefore should never be done. Once you are done tinkering, you should
+/// remove the 'dev_mode' argument from your #[pallet] declaration and fix any compile
+/// errors before attempting to use your pallet in a production scenario.
+/// 
pub use frame_support_procedural::pallet; -/// Contains macro stubs for all of the pallet:: macros -pub mod pallet_macros { - pub use frame_support_procedural::{ - composite_enum, config, disable_frame_system_supertrait_check, disable_try_decode_storage, - error, event, extra_constants, feeless_if, generate_deposit, generate_store, getter, hooks, - import_section, inherent, no_default, no_default_bounds, pallet_section, storage_prefix, - storage_version, type_value, unbounded, validate_unsigned, weight, whitelist_storage, - }; +/// Contains macro stubs for all of the `pallet::` macros +pub mod pallet_macros { + /// Declare the storage as whitelisted from benchmarking. + /// + /// Doing so will exclude reads of that value's storage key from counting towards weight + /// calculations during benchmarking. + /// + /// This attribute should only be attached to storages that are known to be + /// read/used in every block. This will result in a more accurate benchmarking weight. + /// + /// ### Example + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// #[pallet::whitelist_storage] + /// pub type MyStorage = StorageValue<_, u32>; + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + pub use frame_support_procedural::whitelist_storage; + + /// Allows specifying the weight of a call. + /// + /// Each dispatchable needs to define a weight with the `#[pallet::weight($expr)]` + /// attribute. The first argument must be `origin: OriginFor`. + /// + /// ## Example + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::call] + /// impl Pallet { + /// #[pallet::weight({0})] // <- set actual weight here + /// #[pallet::call_index(0)] + /// pub fn something( + /// _: OriginFor, + /// foo: u32, + /// ) -> DispatchResult { + /// unimplemented!() + /// } + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + pub use frame_support_procedural::weight; + + /// Allows whitelisting a storage item from decoding during try-runtime checks. + /// + /// The optional attribute `#[pallet::disable_try_decode_storage]` will declare the + /// storage as whitelisted from decoding during try-runtime checks. This should only be + /// attached to transient storage which cannot be migrated during runtime upgrades. + /// + /// ### Example + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// #[pallet::disable_try_decode_storage] + /// pub type MyStorage = StorageValue<_, u32>; + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + pub use frame_support_procedural::disable_try_decode_storage; + + /// Declares a storage as unbounded in potential size. + /// + /// When implementating the storage info (when `#[pallet::generate_storage_info]` is + /// specified on the pallet struct placeholder), the size of the storage will be declared + /// as unbounded. This can be useful for storage which can never go into PoV (Proof of + /// Validity). + /// + /// ## Example + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// #[pallet::unbounded] + /// pub type MyStorage = StorageValue<_, u32>; + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + pub use frame_support_procedural::unbounded; + + /// Defines what storage prefix to use for a storage item when building the trie. + /// + /// This is helpful if you wish to rename the storage field but don't want to perform a + /// migration. + /// + /// ## Example + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// #[pallet::storage_prefix = "foo"] + /// pub type MyStorage = StorageValue<_, u32>; + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + pub use frame_support_procedural::storage_prefix; + + /// Ensures the generated `DefaultConfig` will not have any bounds for + /// that trait item. + /// + /// Attaching this attribute to a trait item ensures that the generated trait + /// `DefaultConfig` will not have any bounds for this trait item. + /// + /// As an example, if you have a trait item `type AccountId: SomeTrait;` in your `Config` + /// trait, the generated `DefaultConfig` will only have `type AccountId;` with no trait + /// bound. + pub use frame_support_procedural::no_default_bounds; + + /// Ensures the trait item will not be used as a default with the + /// `#[derive_impl(..)]` attribute macro. + /// + /// The optional attribute `#[pallet::no_default]` can be attached to trait items within a + /// `Config` trait impl that has [`#[pallet::config(with_default)]`](`config`) + /// attached. + pub use frame_support_procedural::no_default; + + /// Declares a module as importable into a pallet via + /// [`#[import_section]`](`import_section`). + /// + /// Note that sections are imported by their module name/ident, and should be referred to + /// by their _full path_ from the perspective of the target pallet. Do not attempt to make + /// use of `use` statements to bring pallet sections into scope, as this will not work + /// (unless you do so as part of a wildcard import, in which case it will work). + /// + /// ## Naming Logistics + /// + /// Also note that because of how `#[pallet_section]` works, pallet section names must be + /// globally unique _within the crate in which they are defined_. For more information on + /// why this must be the case, see macro_magic's + /// [`#[export_tokens]`](https://docs.rs/macro_magic/latest/macro_magic/attr.export_tokens.html) macro. + /// + /// Optionally, you may provide an argument to `#[pallet_section]` such as + /// `#[pallet_section(some_ident)]`, in the event that there is another pallet section in + /// same crate with the same ident/name. The ident you specify can then be used instead of + /// the module's ident name when you go to import it via + /// [`#[import_section]`](`import_section`). + pub use frame_support_procedural::pallet_section; + + /// The `#[pallet::inherent]` attribute allows the pallet to provide + /// [inherents](https://docs.substrate.io/fundamentals/transaction-types/#inherent-transactions). + /// + /// An inherent is some piece of data that is inserted by a block authoring node at block + /// creation time and can either be accepted or rejected by validators based on whether the + /// data falls within an acceptable range. + /// + /// The most common inherent is the `timestamp` that is inserted into every block. Since + /// there is no way to validate timestamps, validators simply check that the timestamp + /// reported by the block authoring node falls within an acceptable range. + /// + /// Example usage: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_support::inherent::IsFatalError; + /// # use sp_timestamp::InherentError; + /// # use sp_std::result; + /// # + /// // Example inherent identifier + /// pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"timstap0"; + /// + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::inherent] + /// impl ProvideInherent for Pallet { + /// type Call = Call; + /// type Error = InherentError; + /// const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + /// + /// fn create_inherent(data: &InherentData) -> Option { + /// unimplemented!() + /// } + /// + /// fn check_inherent( + /// call: &Self::Call, + /// data: &InherentData, + /// ) -> result::Result<(), Self::Error> { + /// unimplemented!() + /// } + /// + /// fn is_inherent(call: &Self::Call) -> bool { + /// unimplemented!() + /// } + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// I.e. a trait implementation with bound `T: Config`, of trait `ProvideInherent` for type + /// `Pallet`, and some optional where clause. + /// + /// ## Macro expansion + /// + /// The macro currently makes no use of this information, but it might use this information + /// in the future to give information directly to `construct_runtime`. + pub use frame_support_procedural::inherent; + + /// Splits a pallet declaration into multiple parts. + /// + /// An attribute macro that can be attached to a module declaration. Doing so will + /// import the contents of the specified external pallet section that is defined + /// elsewhere using [`#[pallet_section]`](`pallet_section`). + /// + /// ## Example + /// ``` + /// # use frame_support::pallet_macros::pallet_section; + /// # use frame_support::pallet_macros::import_section; + /// # + /// /// A [`pallet_section`] that defines the events for a pallet. + /// /// This can later be imported into the pallet using [`import_section`]. + /// #[pallet_section] + /// mod events { + /// #[pallet::event] + /// #[pallet::generate_deposit(pub(super) fn deposit_event)] + /// pub enum Event { + /// /// Event documentation should end with an array that provides descriptive names for event + /// /// parameters. [something, who] + /// SomethingStored { something: u32, who: T::AccountId }, + /// } + /// } + /// + /// #[import_section(events)] + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config { + /// # type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// # } + /// } + /// ``` + /// + /// This will result in the contents of `some_section` being _verbatim_ imported into + /// the pallet above. Note that since the tokens for `some_section` are essentially + /// copy-pasted into the target pallet, you cannot refer to imports that don't also + /// exist in the target pallet, but this is easily resolved by including all relevant + /// `use` statements within your pallet section, so they are imported as well, or by + /// otherwise ensuring that you have the same imports on the target pallet. + /// + /// It is perfectly permissible to import multiple pallet sections into the same pallet, + /// which can be done by having multiple `#[import_section(something)]` attributes + /// attached to the pallet. + /// + /// Note that sections are imported by their module name/ident, and should be referred to + /// by their _full path_ from the perspective of the target pallet. + pub use frame_support_procedural::import_section; + + /// Allows defining getter functions on `Pallet` storage. + /// + /// ## Example + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// #[pallet::getter(fn my_getter_fn_name)] + /// pub type MyStorage = StorageValue<_, u32>; + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// See [`pallet::storage`](`frame_support::pallet_macros::storage`) for more info. + pub use frame_support_procedural::getter; + + /// Allows generating the `Store` trait for all storages. + /// + /// DEPRECATED: Will be removed, do not use. + /// See for more details. + /// + /// To generate a `Store` trait associating all storages, annotate your `Pallet` struct + /// with the attribute `#[pallet::generate_store($vis trait Store)]`, e.g.: + /// + /// ```ignore + /// #[pallet::pallet] + /// #[pallet::generate_store(pub(super) trait Store)] + /// pub struct Pallet(_); + /// ``` + /// More precisely, the `Store` trait contains an associated type for each storage. It is + /// implemented for `Pallet` allowing access to the storage from pallet struct. + /// + /// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using + /// `::Foo`. + pub use frame_support_procedural::generate_store; + + /// Defines constants that are added to the constant field of + /// [`PalletMetadata`](frame_metadata::v15::PalletMetadata) struct for this pallet. + /// + /// Must be defined like: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # + /// #[pallet::extra_constants] + /// impl Pallet // $optional_where_clause + /// { + /// #[pallet::constant_name(SomeU32ConstantName)] + /// /// Some doc + /// fn some_u32_constant() -> u32 { + /// 100u32 + /// } + /// } + /// } + /// ``` + /// + /// I.e. a regular rust `impl` block with some optional where clause and functions with 0 + /// args, 0 generics, and some return type. + pub use frame_support_procedural::extra_constants; + + #[rustfmt::skip] + /// Allows bypassing the `frame_system::Config` supertrait check. + /// + /// To bypass the syntactic `frame_system::Config` supertrait check, use the attribute + /// `pallet::disable_frame_system_supertrait_check`. + /// + /// Note this bypass is purely syntactic, and does not actually remove the requirement that your + /// pallet implements `frame_system::Config`. When using this check, your config is still required to implement + /// `frame_system::Config` either via + /// - Implementing a trait that itself implements `frame_system::Config` + /// - Tightly coupling it with another pallet which itself implements `frame_system::Config` + /// + /// e.g. + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// trait OtherTrait: frame_system::Config {} + /// + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::config] + /// #[pallet::disable_frame_system_supertrait_check] + /// pub trait Config: OtherTrait {} + /// } + /// ``` + /// + /// To learn more about supertraits, see the + /// [trait_based_programming](../../polkadot_sdk_docs/reference_docs/trait_based_programming/index.html) + /// reference doc. + pub use frame_support_procedural::disable_frame_system_supertrait_check; + + /// The mandatory attribute allowing definition of configurable types for the pallet. + /// + /// Item must be defined as: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::config] + /// pub trait Config: frame_system::Config // + $optionally_some_other_supertraits + /// // $optional_where_clause + /// { + /// // config items here + /// } + /// } + /// ``` + /// + /// I.e. a regular trait definition named `Config`, with the supertrait + /// [`frame_system::pallet::Config`](../../frame_system/pallet/trait.Config.html), and + /// optionally other supertraits and a where clause. (Specifying other supertraits here is + /// known as [tight coupling](https://docs.substrate.io/reference/how-to-guides/pallet-design/use-tight-coupling/)) + /// + /// The associated type `RuntimeEvent` is reserved. If defined, it must have the bounds + /// `From` and `IsType<::RuntimeEvent>`. + /// + /// [`#[pallet::event]`](`event`) must be present if `RuntimeEvent` + /// exists as a config item in your `#[pallet::config]`. + /// + /// ## Optional: `with_default` + /// + /// An optional `with_default` argument may also be specified. Doing so will automatically + /// generate a `DefaultConfig` trait inside your pallet which is suitable for use with + /// [`#[derive_impl(..)`](`frame_support::derive_impl`) to derive a default testing + /// config: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # use core::fmt::Debug; + /// # use frame_support::traits::Contains; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::config(with_default)] // <- with_default is optional + /// pub trait Config: frame_system::Config { + /// /// The overarching event type. + /// #[pallet::no_default_bounds] // Default is not supported for RuntimeEvent + /// type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// + /// // ...other config items get default + /// } + /// + /// #[pallet::event] + /// pub enum Event { + /// SomeEvent(u16, u32), + /// } + /// } + /// ``` + /// + /// As shown above, you may also attach the [`#[pallet::no_default]`](`no_default`) + /// attribute to specify that a particular trait item _cannot_ be used as a default when a + /// test `Config` is derived using the [`#[derive_impl(..)]`](`frame_support::derive_impl`) + /// attribute macro. This will cause that particular trait item to simply not appear in + /// default testing configs based on this config (the trait item will not be included in + /// `DefaultConfig`). + /// + /// ### `DefaultConfig` Caveats + /// + /// The auto-generated `DefaultConfig` trait: + /// - is always a _subset_ of your pallet's `Config` trait. + /// - can only contain items that don't rely on externalities, such as + /// `frame_system::Config`. + /// + /// Trait items that _do_ rely on externalities should be marked with + /// [`#[pallet::no_default]`](`no_default`) + /// + /// Consequently: + /// - Any items that rely on externalities _must_ be marked with + /// [`#[pallet::no_default]`](`no_default`) or your trait will fail to compile when used + /// with [`derive_impl`](`frame_support::derive_impl`). + /// - Items marked with [`#[pallet::no_default]`](`no_default`) are entirely excluded from + /// the `DefaultConfig` trait, and therefore any impl of `DefaultConfig` doesn't need to + /// implement such items. + /// + /// For more information, see [`frame_support::derive_impl`]. + pub use frame_support_procedural::config; + + /// Allows defining an enum that gets composed as an aggregate enum by `construct_runtime`. + /// + /// The `#[pallet::composite_enum]` attribute allows you to define an enum that gets + /// composed as an aggregate enum by `construct_runtime`. This is similar in principle with + /// [frame_support_procedural::event] and [frame_support_procedural::error]. + /// + /// The attribute currently only supports enum definitions, and identifiers that are named + /// `FreezeReason`, `HoldReason`, `LockId` or `SlashReason`. Arbitrary identifiers for the + /// enum are not supported. The aggregate enum generated by + /// [`frame_support::construct_runtime`](frame_support::construct_runtime) will have the + /// name of `RuntimeFreezeReason`, `RuntimeHoldReason`, `RuntimeLockId` and + /// `RuntimeSlashReason` respectively. + /// + /// NOTE: The aggregate enum generated by `construct_runtime` generates a conversion + /// function from the pallet enum to the aggregate enum, and automatically derives the + /// following traits: + /// + /// ```ignore + /// Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, MaxEncodedLen, TypeInfo, + /// RuntimeDebug + /// ``` + /// + /// For ease of usage, when no `#[derive]` attributes are found for the enum under + /// [`#[pallet::composite_enum]`](composite_enum), the aforementioned traits are + /// automatically derived for it. The inverse is also true: if there are any `#[derive]` + /// attributes found for the enum, then no traits will automatically be derived for it. + /// + /// e.g, defining `HoldReason` in a pallet + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::composite_enum] + /// pub enum HoldReason { + /// /// The NIS Pallet has reserved it for a non-fungible receipt. + /// #[codec(index = 0)] + /// SomeHoldReason, + /// #[codec(index = 1)] + /// SomeOtherHoldReason, + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + pub use frame_support_procedural::composite_enum; + + /// Allows the pallet to validate unsigned transactions. + /// + /// Item must be defined as: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::validate_unsigned] + /// impl sp_runtime::traits::ValidateUnsigned for Pallet { + /// type Call = Call; + /// + /// fn validate_unsigned(_source: TransactionSource, _call: &Self::Call) -> TransactionValidity { + /// // Your implementation details here + /// unimplemented!() + /// } + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// I.e. a trait implementation with bound `T: Config`, of trait + /// [`ValidateUnsigned`](frame_support::pallet_prelude::ValidateUnsigned) for + /// type `Pallet`, and some optional where clause. + /// + /// NOTE: There is also the [`sp_runtime::traits::SignedExtension`] trait that can be used + /// to add some specific logic for transaction validation. + /// + /// ## Macro expansion + /// + /// The macro currently makes no use of this information, but it might use this information + /// in the future to give information directly to [`frame_support::construct_runtime`]. + pub use frame_support_procedural::validate_unsigned; + + /// Allows defining a struct implementing the [`Get`](frame_support::traits::Get) trait to + /// ease the use of storage types. + /// + /// This attribute is meant to be used alongside [`#[pallet::storage]`](`storage`) to + /// define a storage's default value. This attribute can be used multiple times. + /// + /// Item must be defined as: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use sp_runtime::FixedU128; + /// # use frame_support::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::storage] + /// pub(super) type SomeStorage = + /// StorageValue<_, FixedU128, ValueQuery, DefaultForSomeValue>; + /// + /// // Define default for ParachainId + /// #[pallet::type_value] + /// pub fn DefaultForSomeValue() -> FixedU128 { + /// FixedU128::from_u32(1) + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// ## Macro expansion + /// + /// The macro renames the function to some internal name, generates a struct with the + /// original name of the function and its generic, and implements `Get<$ReturnType>` by + /// calling the user defined function. + pub use frame_support_procedural::type_value; + + /// Allows defining a storage version for the pallet. + /// + /// Because the `pallet::pallet` macro implements + /// [`GetStorageVersion`](frame_support::traits::GetStorageVersion), the current storage + /// version needs to be communicated to the macro. This can be done by using the + /// `pallet::storage_version` attribute: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::StorageVersion; + /// # use frame_support::traits::GetStorageVersion; + /// # + /// const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); + /// + /// #[pallet::pallet] + /// #[pallet::storage_version(STORAGE_VERSION)] + /// pub struct Pallet(_); + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// If not present, the current storage version is set to the default value. + pub use frame_support_procedural::storage_version; + + /// The `#[pallet::hooks]` attribute allows you to specify a + /// [`frame_support::traits::Hooks`] implementation for `Pallet` that specifies + /// pallet-specific logic. + /// + /// The item the attribute attaches to must be defined as follows: + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::hooks] + /// impl Hooks> for Pallet { + /// // Implement hooks here + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait + /// `Hooks>` (they are defined in preludes), for the type `Pallet`. + /// + /// Optionally, you could add a where clause. + /// + /// ## Macro expansion + /// + /// The macro implements the traits + /// [`OnInitialize`](frame_support::traits::OnInitialize), + /// [`OnIdle`](frame_support::traits::OnIdle), + /// [`OnFinalize`](frame_support::traits::OnFinalize), + /// [`OnRuntimeUpgrade`](frame_support::traits::OnRuntimeUpgrade), + /// [`OffchainWorker`](frame_support::traits::OffchainWorker), and + /// [`IntegrityTest`](frame_support::traits::IntegrityTest) using + /// the provided [`Hooks`](frame_support::traits::Hooks) implementation. + /// + /// NOTE: `OnRuntimeUpgrade` is implemented with `Hooks::on_runtime_upgrade` and some + /// additional logic. E.g. logic to write the pallet version into storage. + /// + /// NOTE: The macro also adds some tracing logic when implementing the above traits. The + /// following hooks emit traces: `on_initialize`, `on_finalize` and `on_runtime_upgrade`. + pub use frame_support_procedural::hooks; + + /// Generates a helper function on `Pallet` that handles deposit events. + /// + /// NOTE: For instantiable pallets, the event must be generic over `T` and `I`. + /// + /// ## Macro expansion + /// + /// The macro will add on enum `Event` the attributes: + /// * `#[derive(`[`frame_support::CloneNoBound`]`)]` + /// * `#[derive(`[`frame_support::EqNoBound`]`)]` + /// * `#[derive(`[`frame_support::PartialEqNoBound`]`)]` + /// * `#[derive(`[`frame_support::RuntimeDebugNoBound`]`)]` + /// * `#[derive(`[`codec::Encode`]`)]` + /// * `#[derive(`[`codec::Decode`]`)]` + /// + /// The macro implements `From>` for (). + /// + /// The macro implements a metadata function on `Event` returning the `EventMetadata`. + /// + /// If `#[pallet::generate_deposit]` is present then the macro implements `fn + /// deposit_event` on `Pallet`. + pub use frame_support_procedural::generate_deposit; + + /// Allows defining logic to make an extrinsic call feeless. + /// + /// Each dispatchable may be annotated with the `#[pallet::feeless_if($closure)]` + /// attribute, which explicitly defines the condition for the dispatchable to be feeless. + /// + /// The arguments for the closure must be the referenced arguments of the dispatchable + /// function. + /// + /// The closure must return `bool`. + /// + /// ### Example + /// + /// ``` + /// #[frame_support::pallet(dev_mode)] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::call] + /// impl Pallet { + /// #[pallet::call_index(0)] + /// /// Marks this call as feeless if `foo` is zero. + /// #[pallet::feeless_if(|_origin: &OriginFor, foo: &u32| -> bool { + /// *foo == 0 + /// })] + /// pub fn something( + /// _: OriginFor, + /// foo: u32, + /// ) -> DispatchResult { + /// unimplemented!() + /// } + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// + /// Please note that this only works for signed dispatchables and requires a signed + /// extension such as [`pallet_skip_feeless_payment::SkipCheckIfFeeless`] to wrap the + /// existing payment extension. Else, this is completely ignored and the dispatchable is + /// still charged. + /// + /// ### Macro expansion + /// + /// The macro implements the [`pallet_skip_feeless_payment::CheckIfFeeless`] trait on the + /// dispatchable and calls the corresponding closure in the implementation. + /// + /// [`pallet_skip_feeless_payment::SkipCheckIfFeeless`]: ../../pallet_skip_feeless_payment/struct.SkipCheckIfFeeless.html + /// [`pallet_skip_feeless_payment::CheckIfFeeless`]: ../../pallet_skip_feeless_payment/struct.SkipCheckIfFeeless.html + pub use frame_support_procedural::feeless_if; + + /// Allows defining an error enum that will be returned from the dispatchable when an error + /// occurs. + /// + /// The information for this error type is then stored in runtime metadata. + /// + /// Item must be defined as so: + /// + /// ``` + /// #[frame_support::pallet(dev_mode)] + /// mod pallet { + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::error] + /// pub enum Error { + /// /// SomeFieldLessVariant doc + /// SomeFieldLessVariant, + /// /// SomeVariantWithOneField doc + /// SomeVariantWithOneField(u32), + /// } + /// # + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// } + /// ``` + /// I.e. a regular enum named `Error`, with generic `T` and fieldless or multiple-field + /// variants. + /// + /// Any field type in the enum variants must implement [`scale_info::TypeInfo`] in order to + /// be properly used in the metadata, and its encoded size should be as small as possible, + /// preferably 1 byte in size in order to reduce storage size. The error enum itself has an + /// absolute maximum encoded size specified by + /// [`frame_support::MAX_MODULE_ERROR_ENCODED_SIZE`]. + /// + /// (1 byte can still be 256 different errors. The more specific the error, the easier it + /// is to diagnose problems and give a better experience to the user. Don't skimp on having + /// lots of individual error conditions.) + /// + /// Field types in enum variants must also implement [`frame_support::PalletError`], + /// otherwise the pallet will fail to compile. Rust primitive types have already + /// implemented the [`frame_support::PalletError`] trait along with some commonly used + /// stdlib types such as [`Option`] and [`sp_std::marker::PhantomData`], and hence + /// in most use cases, a manual implementation is not necessary and is discouraged. + /// + /// The generic `T` must not bound anything and a `where` clause is not allowed. That said, + /// bounds and/or a where clause should not needed for any use-case. + /// + /// ## Macro expansion + /// + /// The macro implements the [`Debug`] trait and functions `as_u8` using variant position, + /// and `as_str` using variant doc. + /// + /// The macro also implements `From>` for `&'static str` and `From>` for + /// `DispatchError`. + pub use frame_support_procedural::error; + + /// Allows defining pallet events. + /// + /// Pallet events are stored under the `system` / `events` key when the block is applied + /// (and then replaced when the next block writes it's events). + /// + /// The Event enum can be defined as follows: + /// + /// ``` + /// #[frame_support::pallet(dev_mode)] + /// mod pallet { + /// # use frame_support::pallet_prelude::IsType; + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::event] + /// #[pallet::generate_deposit(fn deposit_event)] // Optional + /// pub enum Event { + /// /// SomeEvent doc + /// SomeEvent(u16, u32), // SomeEvent with two fields + /// } + /// + /// #[pallet::config] + /// pub trait Config: frame_system::Config { + /// /// The overarching runtime event type. + /// type RuntimeEvent: From> + /// + IsType<::RuntimeEvent>; + /// } + /// } + /// ``` + /// + /// I.e. an enum (with named or unnamed fields variant), named `Event`, with generic: none + /// or `T` or `T: Config`, and optional w here clause. + /// + /// `RuntimeEvent` must be defined in the `Config`, as shown in the example. + /// + /// Each field must implement [`Clone`], [`Eq`], [`PartialEq`], [`codec::Encode`], + /// [`codec::Decode`], and [`Debug`] (on std only). For ease of use, bound by the trait + /// `Member`, available in [`frame_support::pallet_prelude`]. + pub use frame_support_procedural::event; - /// Allows a pallet to declare a set of functions as a *dispatchable extrinsic*. In - /// slightly simplified terms, this macro declares the set of "transactions" of a pallet. + /// Allows a pallet to declare a set of functions as a *dispatchable extrinsic*. + /// + /// In slightly simplified terms, this macro declares the set of "transactions" of a + /// pallet. /// /// > The exact definition of **extrinsic** can be found in /// > [`sp_runtime::generic::UncheckedExtrinsic`]. @@ -2391,14 +2003,24 @@ pub mod pallet_macros { /// If no `#[pallet::call]` exists, then a default implementation corresponding to the /// following code is automatically generated: /// - /// ```ignore - /// #[pallet::call] - /// impl Pallet {} + /// ``` + /// #[frame_support::pallet(dev_mode)] + /// mod pallet { + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::call] // <- automatically generated + /// impl Pallet {} // <- automatically generated + /// + /// #[pallet::config] + /// pub trait Config: frame_system::Config {} + /// } /// ``` pub use frame_support_procedural::call; - /// Enforce the index of a variant in the generated `enum Call`. See [`call`] for more - /// information. + /// Enforce the index of a variant in the generated `enum Call`. + /// + /// See [`call`] for more information. /// /// All call indexes start from 0, until it encounters a dispatchable function with a /// defined call index. The dispatchable function that lexically follows the function with @@ -2408,7 +2030,9 @@ pub mod pallet_macros { pub use frame_support_procedural::call_index; /// Declares the arguments of a [`call`] function to be encoded using - /// [`codec::Compact`]. This will results in smaller extrinsic encoding. + /// [`codec::Compact`]. + /// + /// This will results in smaller extrinsic encoding. /// /// A common example of `compact` is for numeric values that are often times far far away /// from their theoretical maximum. For example, in the context of a crypto-currency, the @@ -2504,8 +2128,8 @@ pub mod pallet_macros { /// ``` pub use frame_support_procedural::genesis_build; - /// The `#[pallet::constant]` attribute can be used to add an associated type trait bounded - /// by [`Get`](frame_support::pallet_prelude::Get) from [`pallet::config`](`macro@config`) + /// Allows adding an associated type trait bounded by + /// [`Get`](frame_support::pallet_prelude::Get) from [`pallet::config`](`macro@config`) /// into metadata. /// /// ## Example @@ -2526,9 +2150,10 @@ pub mod pallet_macros { /// ``` pub use frame_support_procedural::constant; - /// Declares a type alias as a storage item. Storage items are pointers to data stored - /// on-chain (the *blockchain state*), under a specific key. The exact key is dependent on - /// the type of the storage. + /// Declares a type alias as a storage item. + /// + /// Storage items are pointers to data stored on-chain (the *blockchain state*), under a + /// specific key. The exact key is dependent on the type of the storage. /// /// > From the perspective of this pallet, the entire blockchain state is abstracted behind /// > a key-value api, namely [`sp_io::storage`]. @@ -2730,7 +2355,10 @@ pub mod pallet_macros { /// } /// ``` pub use frame_support_procedural::storage; - /// This attribute is attached to a function inside an `impl` block annotated with + + /// Allows defining conditions for a task to run. + /// + /// This attribute is attached to a function inside an `impl` block annoated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the conditions for a /// given work item to be valid. /// @@ -2738,29 +2366,39 @@ pub mod pallet_macros { /// should have the same signature as the function it is attached to, except that it should /// return a `bool` instead. pub use frame_support_procedural::task_condition; - /// This attribute is attached to a function inside an `impl` block annotated with + + /// Allows defining an index for a task. + /// + /// This attribute is attached to a function inside an `impl` block annoated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the index of a given /// work item. /// /// It takes an integer literal as input, which is then used to define the index. This /// index should be unique for each function in the `impl` block. pub use frame_support_procedural::task_index; - /// This attribute is attached to a function inside an `impl` block annotated with - /// [`pallet::tasks_experimental`](`tasks_experimental`) to define an iterator over the - /// available work items for a task. + + /// Allows defining an iterator over available work items for a task. + /// + /// This attribute is attached to a function inside an `impl` block annoated with + /// [`pallet::tasks_experimental`](`tasks_experimental`). /// /// It takes an iterator as input that yields a tuple with same types as the function /// arguments. pub use frame_support_procedural::task_list; - /// This attribute is attached to a function inside an `impl` block annotated with + + /// Allows defining the weight of a task. + /// + /// This attribute is attached to a function inside an `impl` block annoated with /// [`pallet::tasks_experimental`](`tasks_experimental`) define the weight of a given work /// item. /// /// It takes a closure as input, which should return a `Weight` value. pub use frame_support_procedural::task_weight; + /// Allows you to define some service work that can be recognized by a script or an - /// off-chain worker. Such a script can then create and submit all such work items at any - /// given time. + /// off-chain worker. + /// + /// Such a script can then create and submit all such work items at any given time. /// /// These work items are defined as instances of the [`Task`](frame_support::traits::Task) /// trait. [`pallet:tasks_experimental`](`tasks_experimental`) when attached to an `impl` @@ -2814,7 +2452,7 @@ pub mod pallet_macros { /// } /// ``` /// - /// Or, more commonly used:/ + /// Or, more commonly used: /// /// ``` /// #[frame_support::pallet] @@ -2839,6 +2477,9 @@ pub mod pallet_macros { /// /// Modifying any pallet's origin type will cause the runtime level origin type to also /// change in encoding. If stored anywhere on-chain, this will require a data migration. + /// + /// Read more about origins at the [Origin Reference + /// Docs](../../polkadot_sdk_docs/reference_docs/frame_origin/index.html). pub use frame_support_procedural::origin; } -- GitLab From fd5f9292f500652e1d4792b09fb8ac60e1268ce4 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Mon, 4 Mar 2024 20:12:43 +0100 Subject: [PATCH 35/86] FRAME: Create `TransactionExtension` as a replacement for `SignedExtension` (#2280) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #2160 First part of [Extrinsic Horizon](https://github.com/paritytech/polkadot-sdk/issues/2415) Introduces a new trait `TransactionExtension` to replace `SignedExtension`. Introduce the idea of transactions which obey the runtime's extensions and have according Extension data (né Extra data) yet do not have hard-coded signatures. Deprecate the terminology of "Unsigned" when used for transactions/extrinsics owing to there now being "proper" unsigned transactions which obey the extension framework and "old-style" unsigned which do not. Instead we have __*General*__ for the former and __*Bare*__ for the latter. (Ultimately, the latter will be phased out as a type of transaction, and Bare will only be used for Inherents.) Types of extrinsic are now therefore: - Bare (no hardcoded signature, no Extra data; used to be known as "Unsigned") - Bare transactions (deprecated): Gossiped, validated with `ValidateUnsigned` (deprecated) and the `_bare_compat` bits of `TransactionExtension` (deprecated). - Inherents: Not gossiped, validated with `ProvideInherent`. - Extended (Extra data): Gossiped, validated via `TransactionExtension`. - Signed transactions (with a hardcoded signature). - General transactions (without a hardcoded signature). `TransactionExtension` differs from `SignedExtension` because: - A signature on the underlying transaction may validly not be present. - It may alter the origin during validation. - `pre_dispatch` is renamed to `prepare` and need not contain the checks present in `validate`. - `validate` and `prepare` is passed an `Origin` rather than a `AccountId`. - `validate` may pass arbitrary information into `prepare` via a new user-specifiable type `Val`. - `AdditionalSigned`/`additional_signed` is renamed to `Implicit`/`implicit`. It is encoded *for the entire transaction* and passed in to each extension as a new argument to `validate`. This facilitates the ability of extensions to acts as underlying crypto. There is a new `DispatchTransaction` trait which contains only default function impls and is impl'ed for any `TransactionExtension` impler. It provides several utility functions which reduce some of the tedium from using `TransactionExtension` (indeed, none of its regular functions should now need to be called directly). Three transaction version discriminator ("versions") are now permissible: - 0b000000100: Bare (used to be called "Unsigned"): contains Signature or Extra (extension data). After bare transactions are no longer supported, this will strictly identify an Inherents only. - 0b100000100: Old-school "Signed" Transaction: contains Signature and Extra (extension data). - 0b010000100: New-school "General" Transaction: contains Extra (extension data), but no Signature. For the New-school General Transaction, it becomes trivial for authors to publish extensions to the mechanism for authorizing an Origin, e.g. through new kinds of key-signing schemes, ZK proofs, pallet state, mutations over pre-authenticated origins or any combination of the above. ## Code Migration ### NOW: Getting it to build Wrap your `SignedExtension`s in `AsTransactionExtension`. This should be accompanied by renaming your aggregate type in line with the new terminology. E.g. Before: ```rust /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( /* snip */ MySpecialSignedExtension, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; ``` After: ```rust /// The extension to the basic transaction logic. pub type TxExtension = ( /* snip */ AsTransactionExtension, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; ``` You'll also need to alter any transaction building logic to add a `.into()` to make the conversion happen. E.g. Before: ```rust fn construct_extrinsic( /* snip */ ) -> UncheckedExtrinsic { let extra: SignedExtra = ( /* snip */ MySpecialSignedExtension::new(/* snip */), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); UncheckedExtrinsic::new_signed( /* snip */ Signature::Sr25519(signature), extra, ) } ``` After: ```rust fn construct_extrinsic( /* snip */ ) -> UncheckedExtrinsic { let tx_ext: TxExtension = ( /* snip */ MySpecialSignedExtension::new(/* snip */).into(), ); let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); UncheckedExtrinsic::new_signed( /* snip */ Signature::Sr25519(signature), tx_ext, ) } ``` ### SOON: Migrating to `TransactionExtension` Most `SignedExtension`s can be trivially converted to become a `TransactionExtension`. There are a few things to know. - Instead of a single trait like `SignedExtension`, you should now implement two traits individually: `TransactionExtensionBase` and `TransactionExtension`. - Weights are now a thing and must be provided via the new function `fn weight`. #### `TransactionExtensionBase` This trait takes care of anything which is not dependent on types specific to your runtime, most notably `Call`. - `AdditionalSigned`/`additional_signed` is renamed to `Implicit`/`implicit`. - Weight must be returned by implementing the `weight` function. If your extension is associated with a pallet, you'll probably want to do this via the pallet's existing benchmarking infrastructure. #### `TransactionExtension` Generally: - `pre_dispatch` is now `prepare` and you *should not reexecute the `validate` functionality in there*! - You don't get an account ID any more; you get an origin instead. If you need to presume an account ID, then you can use the trait function `AsSystemOriginSigner::as_system_origin_signer`. - You get an additional ticket, similar to `Pre`, called `Val`. This defines data which is passed from `validate` into `prepare`. This is important since you should not be duplicating logic from `validate` to `prepare`, you need a way of passing your working from the former into the latter. This is it. - This trait takes two type parameters: `Call` and `Context`. `Call` is the runtime call type which used to be an associated type; you can just move it to become a type parameter for your trait impl. `Context` is not currently used and you can safely implement over it as an unbounded type. - There's no `AccountId` associated type any more. Just remove it. Regarding `validate`: - You get three new parameters in `validate`; all can be ignored when migrating from `SignedExtension`. - `validate` returns a tuple on success; the second item in the tuple is the new ticket type `Self::Val` which gets passed in to `prepare`. If you use any information extracted during `validate` (off-chain and on-chain, non-mutating) in `prepare` (on-chain, mutating) then you can pass it through with this. For the tuple's last item, just return the `origin` argument. Regarding `prepare`: - This is renamed from `pre_dispatch`, but there is one change: - FUNCTIONALITY TO VALIDATE THE TRANSACTION NEED NOT BE DUPLICATED FROM `validate`!! - (This is different to `SignedExtension` which was required to run the same checks in `pre_dispatch` as in `validate`.) Regarding `post_dispatch`: - Since there are no unsigned transactions handled by `TransactionExtension`, `Pre` is always defined, so the first parameter is `Self::Pre` rather than `Option`. If you make use of `SignedExtension::validate_unsigned` or `SignedExtension::pre_dispatch_unsigned`, then: - Just use the regular versions of these functions instead. - Have your logic execute in the case that the `origin` is `None`. - Ensure your transaction creation logic creates a General Transaction rather than a Bare Transaction; this means having to include all `TransactionExtension`s' data. - `ValidateUnsigned` can still be used (for now) if you need to be able to construct transactions which contain none of the extension data, however these will be phased out in stage 2 of the Transactions Horizon, so you should consider moving to an extension-centric design. ## TODO - [x] Introduce `CheckSignature` impl of `TransactionExtension` to ensure it's possible to have crypto be done wholly in a `TransactionExtension`. - [x] Deprecate `SignedExtension` and move all uses in codebase to `TransactionExtension`. - [x] `ChargeTransactionPayment` - [x] `DummyExtension` - [x] `ChargeAssetTxPayment` (asset-tx-payment) - [x] `ChargeAssetTxPayment` (asset-conversion-tx-payment) - [x] `CheckWeight` - [x] `CheckTxVersion` - [x] `CheckSpecVersion` - [x] `CheckNonce` - [x] `CheckNonZeroSender` - [x] `CheckMortality` - [x] `CheckGenesis` - [x] `CheckOnlySudoAccount` - [x] `WatchDummy` - [x] `PrevalidateAttests` - [x] `GenericSignedExtension` - [x] `SignedExtension` (chain-polkadot-bulletin) - [x] `RefundSignedExtensionAdapter` - [x] Implement `fn weight` across the board. - [ ] Go through all pre-existing extensions which assume an account signer and explicitly handle the possibility of another kind of origin. - [x] `CheckNonce` should probably succeed in the case of a non-account origin. - [x] `CheckNonZeroSender` should succeed in the case of a non-account origin. - [x] `ChargeTransactionPayment` and family should fail in the case of a non-account origin. - [ ] - [x] Fix any broken tests. --------- Signed-off-by: georgepisaltu Signed-off-by: Alexandru Vasile Signed-off-by: dependabot[bot] Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Alexandru Gheorghe Signed-off-by: Andrei Sandu Co-authored-by: Nikhil Gupta <17176722+gupnik@users.noreply.github.com> Co-authored-by: georgepisaltu <52418509+georgepisaltu@users.noreply.github.com> Co-authored-by: Chevdor Co-authored-by: Bastian Kâcher Co-authored-by: Maciej Co-authored-by: Javier Viola Co-authored-by: Marcin S. Co-authored-by: Tsvetomir Dimitrov Co-authored-by: Javier Bullrich Co-authored-by: Koute Co-authored-by: Adrian Catangiu Co-authored-by: Vladimir Istyufeev Co-authored-by: Ross Bulat Co-authored-by: Gonçalo Pestana Co-authored-by: Liam Aharon Co-authored-by: Svyatoslav Nikolsky Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: Oliver Tale-Yazdi Co-authored-by: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Co-authored-by: ordian Co-authored-by: Sebastian Kunert Co-authored-by: Aaro Altonen <48052676+altonen@users.noreply.github.com> Co-authored-by: Dmitry Markin Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Co-authored-by: Julian Eager Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Co-authored-by: Davide Galassi Co-authored-by: Dónal Murray Co-authored-by: yjh Co-authored-by: Tom Mi Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Will | Paradox | ParaNodes.io <79228812+paradox-tt@users.noreply.github.com> Co-authored-by: Bastian Kâcher Co-authored-by: Joshy Orndorff Co-authored-by: Joshy Orndorff Co-authored-by: PG Herveou Co-authored-by: Alexander Theißen Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Juan Girini Co-authored-by: bader y Co-authored-by: James Wilson Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Co-authored-by: asynchronous rob Co-authored-by: Parth Co-authored-by: Andrew Jones Co-authored-by: Jonathan Udd Co-authored-by: Serban Iorga Co-authored-by: Egor_P Co-authored-by: Branislav Kontur Co-authored-by: Evgeny Snitko Co-authored-by: Just van Stam Co-authored-by: Francisco Aguirre Co-authored-by: gupnik Co-authored-by: dzmitry-lahoda Co-authored-by: zhiqiangxu <652732310@qq.com> Co-authored-by: Nazar Mokrynskyi Co-authored-by: Anwesh Co-authored-by: cheme Co-authored-by: Sam Johnson Co-authored-by: kianenigma Co-authored-by: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Co-authored-by: Muharem Co-authored-by: joepetrowski Co-authored-by: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Co-authored-by: Gabriel Facco de Arruda Co-authored-by: Squirrel Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Co-authored-by: georgepisaltu Co-authored-by: command-bot <> --- Cargo.lock | 35 +- bridges/bin/runtime-common/Cargo.toml | 1 + bridges/bin/runtime-common/src/lib.rs | 72 +- bridges/bin/runtime-common/src/mock.rs | 2 +- .../runtime-common/src/priority_calculator.rs | 11 +- .../src/refund_relayer_extension.rs | 154 +- .../chain-bridge-hub-cumulus/src/lib.rs | 4 +- .../chain-bridge-hub-rococo/src/lib.rs | 2 +- bridges/primitives/chain-kusama/src/lib.rs | 4 +- .../chain-polkadot-bulletin/src/lib.rs | 51 +- bridges/primitives/chain-polkadot/src/lib.rs | 4 +- bridges/primitives/chain-rococo/src/lib.rs | 4 +- bridges/primitives/chain-westend/src/lib.rs | 4 +- bridges/primitives/polkadot-core/src/lib.rs | 37 +- bridges/primitives/runtime/src/extensions.rs | 135 +- .../snowbridge/runtime/test-common/Cargo.toml | 1 + cumulus/parachain-template/runtime/Cargo.toml | 2 + cumulus/parachain-template/runtime/src/lib.rs | 10 +- .../assets/asset-hub-rococo/Cargo.toml | 2 + .../assets/asset-hub-rococo/src/lib.rs | 77 +- .../src/weights/frame_system_extensions.rs | 121 + .../asset-hub-rococo/src/weights/mod.rs | 3 + .../pallet_asset_conversion_tx_payment.rs | 92 + .../src/weights/pallet_transaction_payment.rs | 67 + .../assets/asset-hub-westend/Cargo.toml | 2 + .../assets/asset-hub-westend/src/lib.rs | 77 +- .../src/weights/frame_system_extensions.rs | 121 + .../asset-hub-westend/src/weights/mod.rs | 3 + .../pallet_asset_conversion_tx_payment.rs | 92 + .../src/weights/pallet_transaction_payment.rs | 67 + .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 1 + .../src/bridge_to_bulletin_config.rs | 4 +- .../src/bridge_to_westend_config.rs | 4 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 27 +- .../src/weights/frame_system_extensions.rs | 121 + .../bridge-hub-rococo/src/weights/mod.rs | 2 + .../src/weights/pallet_transaction_payment.rs | 67 + .../bridge-hub-rococo/tests/snowbridge.rs | 8 +- .../bridge-hub-rococo/tests/tests.rs | 11 +- .../bridge-hubs/bridge-hub-westend/Cargo.toml | 1 + .../src/bridge_to_rococo_config.rs | 4 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 25 +- .../src/weights/frame_system_extensions.rs | 121 + .../bridge-hub-westend/src/weights/mod.rs | 2 + .../src/weights/pallet_transaction_payment.rs | 67 + .../bridge-hub-westend/tests/tests.rs | 11 +- .../collectives-westend/Cargo.toml | 1 + .../collectives-westend/src/lib.rs | 12 +- .../src/weights/frame_system_extensions.rs | 121 + .../collectives-westend/src/weights/mod.rs | 2 + .../src/weights/pallet_transaction_payment.rs | 67 + .../contracts/contracts-rococo/Cargo.toml | 1 + .../contracts/contracts-rococo/src/lib.rs | 10 +- .../coretime/coretime-rococo/Cargo.toml | 1 + .../coretime/coretime-rococo/src/lib.rs | 9 +- .../src/weights/frame_system_extensions.rs | 121 + .../coretime-rococo/src/weights/mod.rs | 2 + .../src/weights/pallet_transaction_payment.rs | 67 + .../coretime/coretime-westend/Cargo.toml | 1 + .../coretime/coretime-westend/src/lib.rs | 9 +- .../src/weights/frame_system_extensions.rs | 121 + .../coretime-westend/src/weights/mod.rs | 2 + .../src/weights/pallet_transaction_payment.rs | 67 + .../glutton/glutton-westend/src/lib.rs | 9 +- .../src/weights/frame_system_extensions.rs | 119 + .../runtimes/people/people-rococo/Cargo.toml | 1 + .../runtimes/people/people-rococo/src/lib.rs | 9 +- .../src/weights/frame_system_extensions.rs | 121 + .../people/people-rococo/src/weights/mod.rs | 2 + .../src/weights/pallet_transaction_payment.rs | 67 + .../runtimes/people/people-westend/Cargo.toml | 1 + .../runtimes/people/people-westend/src/lib.rs | 8 +- .../src/weights/frame_system_extensions.rs | 121 + .../people/people-westend/src/weights/mod.rs | 2 + .../src/weights/pallet_transaction_payment.rs | 67 + .../runtimes/starters/seedling/src/lib.rs | 6 +- .../runtimes/starters/shell/src/lib.rs | 64 +- .../runtimes/testing/penpal/Cargo.toml | 1 + .../runtimes/testing/penpal/src/lib.rs | 26 +- .../testing/rococo-parachain/Cargo.toml | 1 + .../testing/rococo-parachain/src/lib.rs | 7 +- cumulus/polkadot-parachain/Cargo.toml | 1 + .../storage-weight-reclaim/Cargo.toml | 13 +- .../src/benchmarking.rs | 70 + .../storage-weight-reclaim/src/lib.rs | 522 +--- .../storage-weight-reclaim/src/tests.rs | 484 ++++ cumulus/test/client/Cargo.toml | 2 + cumulus/test/client/src/lib.rs | 13 +- cumulus/test/runtime/src/lib.rs | 9 +- cumulus/test/service/Cargo.toml | 2 + cumulus/test/service/src/bench_utils.rs | 4 +- cumulus/test/service/src/lib.rs | 9 +- .../src/reference_docs/extrinsic_encoding.rs | 101 +- .../src/reference_docs/frame_runtime_types.rs | 2 +- docs/sdk/src/reference_docs/mod.rs | 4 +- .../src/reference_docs/signed_extensions.rs | 79 - .../reference_docs/transaction_extensions.rs | 57 + polkadot/node/service/Cargo.toml | 1 + polkadot/node/service/src/benchmarking.rs | 18 +- polkadot/node/test/service/Cargo.toml | 1 + polkadot/node/test/service/src/lib.rs | 11 +- polkadot/runtime/common/Cargo.toml | 1 + polkadot/runtime/common/src/claims.rs | 150 +- polkadot/runtime/rococo/Cargo.toml | 1 + .../constants/src/weights/block_weights.rs | 29 +- .../src/weights/extrinsic_weights.rs | 29 +- polkadot/runtime/rococo/src/lib.rs | 45 +- .../weights/frame_benchmarking_baseline.rs | 43 +- .../rococo/src/weights/frame_system.rs | 103 +- .../src/weights/frame_system_extensions.rs | 123 + polkadot/runtime/rococo/src/weights/mod.rs | 2 + .../rococo/src/weights/pallet_asset_rate.rs | 63 +- .../src/weights/pallet_balances_balances.rs | 58 +- ...allet_balances_nis_counterpart_balances.rs | 58 +- .../rococo/src/weights/pallet_bounties.rs | 218 +- .../src/weights/pallet_child_bounties.rs | 181 +- .../src/weights/pallet_conviction_voting.rs | 196 +- .../rococo/src/weights/pallet_identity.rs | 409 +-- .../rococo/src/weights/pallet_indices.rs | 73 +- .../src/weights/pallet_message_queue.rs | 168 +- .../rococo/src/weights/pallet_multisig.rs | 123 +- .../runtime/rococo/src/weights/pallet_nis.rs | 243 +- .../rococo/src/weights/pallet_preimage.rs | 252 +- .../rococo/src/weights/pallet_proxy.rs | 195 +- .../src/weights/pallet_ranked_collective.rs | 66 +- .../rococo/src/weights/pallet_recovery.rs | 186 ++ .../pallet_referenda_fellowship_referenda.rs | 235 +- .../src/weights/pallet_referenda_referenda.rs | 283 +-- .../rococo/src/weights/pallet_scheduler.rs | 146 +- .../runtime/rococo/src/weights/pallet_sudo.rs | 45 +- .../rococo/src/weights/pallet_timestamp.rs | 35 +- .../src/weights/pallet_transaction_payment.rs | 68 + .../rococo/src/weights/pallet_treasury.rs | 233 +- .../rococo/src/weights/pallet_utility.rs | 47 +- .../rococo/src/weights/pallet_vesting.rs | 254 +- .../rococo/src/weights/pallet_whitelist.rs | 68 +- .../runtime/rococo/src/weights/pallet_xcm.rs | 90 +- .../weights/pallet_xcm_benchmarks_fungible.rs | 191 ++ .../weights/pallet_xcm_benchmarks_generic.rs | 347 +++ .../weights/runtime_common_assigned_slots.rs | 70 +- .../src/weights/runtime_common_auctions.rs | 137 +- .../src/weights/runtime_common_claims.rs | 180 +- .../src/weights/runtime_common_coretime.rs | 86 + .../src/weights/runtime_common_crowdloan.rs | 239 +- .../runtime_common_identity_migrator.rs | 83 +- .../weights/runtime_common_paras_registrar.rs | 281 ++- .../src/weights/runtime_common_slots.rs | 127 +- .../runtime_parachains_assigner_on_demand.rs | 50 +- .../runtime_parachains_configuration.rs | 48 +- .../weights/runtime_parachains_disputes.rs | 23 +- .../src/weights/runtime_parachains_hrmp.rs | 389 ++- .../weights/runtime_parachains_inclusion.rs | 45 +- .../weights/runtime_parachains_initializer.rs | 27 +- .../src/weights/runtime_parachains_paras.rs | 368 +-- .../runtime_parachains_paras_inherent.rs | 429 +++- polkadot/runtime/test-runtime/Cargo.toml | 1 + polkadot/runtime/test-runtime/src/lib.rs | 40 +- polkadot/runtime/westend/Cargo.toml | 1 + polkadot/runtime/westend/src/lib.rs | 25 +- .../src/weights/frame_system_extensions.rs | 116 + polkadot/runtime/westend/src/weights/mod.rs | 2 + .../westend/src/weights/pallet_sudo.rs | 11 + .../src/weights/pallet_transaction_payment.rs | 65 + polkadot/xcm/xcm-builder/Cargo.toml | 1 + .../xcm/xcm-builder/src/tests/pay/mock.rs | 12 +- .../xcm/xcm-simulator/fuzzer/src/parachain.rs | 4 +- .../xcm-simulator/fuzzer/src/relay_chain.rs | 4 +- prdoc/pr_2280.prdoc | 144 ++ substrate/.maintain/frame-weight-template.hbs | 2 +- substrate/bin/minimal/runtime/src/lib.rs | 4 +- substrate/bin/node-template/node/Cargo.toml | 1 + .../node-template/node/src/benchmarking.rs | 9 +- .../bin/node-template/runtime/Cargo.toml | 1 + .../bin/node-template/runtime/src/lib.rs | 12 +- substrate/bin/node/cli/Cargo.toml | 2 + .../bin/node/cli/benches/block_production.rs | 2 +- substrate/bin/node/cli/benches/executor.rs | 6 +- substrate/bin/node/cli/src/service.rs | 77 +- substrate/bin/node/cli/tests/basic.rs | 26 +- substrate/bin/node/cli/tests/fees.rs | 24 +- .../bin/node/cli/tests/submit_transaction.rs | 10 +- substrate/bin/node/runtime/Cargo.toml | 2 + substrate/bin/node/runtime/src/lib.rs | 165 +- substrate/bin/node/testing/src/bench.rs | 26 +- substrate/bin/node/testing/src/keyring.rs | 44 +- .../client/api/src/notifications/tests.rs | 7 +- substrate/client/db/benches/state_access.rs | 5 +- substrate/client/db/src/lib.rs | 262 +- substrate/client/db/src/utils.rs | 11 +- .../network-gossip/src/state_machine.rs | 5 +- substrate/client/network/sync/src/blocks.rs | 7 +- substrate/client/transaction-pool/src/lib.rs | 9 +- substrate/frame/alliance/src/weights.rs | 1033 ++++---- .../frame/asset-conversion/src/weights.rs | 126 +- substrate/frame/asset-rate/src/weights.rs | 73 +- substrate/frame/assets/src/weights.rs | 841 +++---- substrate/frame/babe/src/mock.rs | 5 +- substrate/frame/bags-list/src/weights.rs | 165 +- substrate/frame/balances/Cargo.toml | 1 + .../balances/src/tests/currency_tests.rs | 17 +- substrate/frame/balances/src/tests/mod.rs | 4 +- substrate/frame/balances/src/weights.rs | 98 +- substrate/frame/beefy/src/mock.rs | 6 +- substrate/frame/benchmarking/src/weights.rs | 81 +- substrate/frame/bounties/src/weights.rs | 405 ++- substrate/frame/broker/src/weights.rs | 357 +-- substrate/frame/child-bounties/src/weights.rs | 381 ++- substrate/frame/collective/src/tests.rs | 2 +- substrate/frame/collective/src/weights.rs | 677 ++--- substrate/frame/contracts/src/weights.rs | 2222 +++++++++-------- .../frame/conviction-voting/src/weights.rs | 405 +-- .../frame/core-fellowship/src/weights.rs | 429 ++-- substrate/frame/democracy/src/weights.rs | 1093 ++++---- .../election-provider-multi-phase/src/mock.rs | 2 +- .../src/unsigned.rs | 4 +- .../src/weights.rs | 517 ++-- .../test-staking-e2e/src/mock.rs | 4 +- substrate/frame/elections-phragmen/src/lib.rs | 2 +- .../frame/elections-phragmen/src/weights.rs | 655 ++--- substrate/frame/examples/basic/src/lib.rs | 76 +- substrate/frame/examples/basic/src/tests.rs | 9 +- .../examples/offchain-worker/src/tests.rs | 22 +- substrate/frame/examples/tasks/src/weights.rs | 78 +- substrate/frame/executive/src/tests.rs | 225 +- substrate/frame/fast-unstake/src/weights.rs | 473 ++-- substrate/frame/glutton/src/weights.rs | 249 +- substrate/frame/grandpa/src/mock.rs | 9 +- substrate/frame/identity/src/weights.rs | 825 +++--- substrate/frame/im-online/src/mock.rs | 4 +- substrate/frame/im-online/src/tests.rs | 4 +- substrate/frame/im-online/src/weights.rs | 85 +- substrate/frame/indices/src/weights.rs | 121 +- substrate/frame/lottery/src/weights.rs | 301 +-- substrate/frame/membership/src/weights.rs | 385 ++- substrate/frame/message-queue/src/weights.rs | 247 +- substrate/frame/migrations/src/weights.rs | 183 +- substrate/frame/multisig/src/weights.rs | 243 +- .../nft-fractionalization/src/weights.rs | 209 +- substrate/frame/nfts/src/weights.rs | 1692 ++++++------- substrate/frame/nis/src/weights.rs | 429 ++-- .../frame/nomination-pools/src/weights.rs | 258 +- .../frame/offences/benchmarking/src/mock.rs | 2 +- substrate/frame/parameters/src/weights.rs | 38 +- substrate/frame/preimage/src/weights.rs | 332 +-- substrate/frame/proxy/src/weights.rs | 377 +-- .../frame/ranked-collective/src/weights.rs | 353 +-- substrate/frame/recovery/src/weights.rs | 305 +-- substrate/frame/referenda/src/weights.rs | 1037 ++++---- substrate/frame/remark/src/weights.rs | 37 +- substrate/frame/safe-mode/src/weights.rs | 142 +- substrate/frame/salary/src/weights.rs | 217 +- substrate/frame/sassafras/src/mock.rs | 5 +- substrate/frame/scheduler/src/weights.rs | 266 +- substrate/frame/session/src/weights.rs | 117 +- substrate/frame/society/src/weights.rs | 974 ++++++-- substrate/frame/src/lib.rs | 6 +- substrate/frame/staking/src/weights.rs | 422 ++-- .../frame/state-trie-migration/src/lib.rs | 2 +- .../frame/state-trie-migration/src/weights.rs | 118 +- substrate/frame/sudo/src/benchmarking.rs | 29 +- substrate/frame/sudo/src/extension.rs | 78 +- substrate/frame/sudo/src/lib.rs | 4 +- substrate/frame/sudo/src/weights.rs | 71 +- substrate/frame/support/Cargo.toml | 2 +- .../src/construct_runtime/expand/inherent.rs | 18 +- .../src/construct_runtime/expand/metadata.rs | 6 +- .../src/construct_runtime/expand/origin.rs | 14 + substrate/frame/support/src/dispatch.rs | 29 +- substrate/frame/support/src/lib.rs | 8 +- .../frame/support/src/traits/dispatch.rs | 21 +- substrate/frame/support/src/traits/misc.rs | 13 +- .../support/src/transaction_extensions.rs | 89 + .../support/src/weights/block_weights.rs | 31 +- .../support/src/weights/extrinsic_weights.rs | 31 +- .../support/test/tests/construct_runtime.rs | 2 +- .../deprecated_where_block.stderr | 4 +- substrate/frame/support/test/tests/pallet.rs | 143 +- .../support/test/tests/pallet_instance.rs | 2 +- .../system/benchmarking/src/extensions.rs | 213 ++ .../frame/system/benchmarking/src/lib.rs | 1 + .../frame/system/benchmarking/src/mock.rs | 1 + .../system/src/extensions/check_genesis.rs | 33 +- .../system/src/extensions/check_mortality.rs | 83 +- .../src/extensions/check_non_zero_sender.rs | 95 +- .../system/src/extensions/check_nonce.rs | 184 +- .../src/extensions/check_spec_version.rs | 33 +- .../system/src/extensions/check_tx_version.rs | 32 +- .../system/src/extensions/check_weight.rs | 228 +- substrate/frame/system/src/extensions/mod.rs | 3 + .../frame/system/src/extensions/weights.rs | 196 ++ substrate/frame/system/src/lib.rs | 10 +- substrate/frame/system/src/offchain.rs | 22 +- substrate/frame/system/src/weights.rs | 249 +- substrate/frame/timestamp/src/weights.rs | 65 +- substrate/frame/tips/src/weights.rs | 241 +- .../frame/transaction-payment/Cargo.toml | 9 + .../asset-conversion-tx-payment/Cargo.toml | 12 + .../asset-conversion-tx-payment/README.md | 2 +- .../src/benchmarking.rs | 125 + .../asset-conversion-tx-payment/src/lib.rs | 233 +- .../asset-conversion-tx-payment/src/mock.rs | 70 +- .../asset-conversion-tx-payment/src/tests.rs | 150 +- .../src/weights.rs | 150 ++ .../asset-tx-payment/Cargo.toml | 1 + .../asset-tx-payment/README.md | 2 +- .../asset-tx-payment/src/benchmarking.rs | 123 + .../asset-tx-payment/src/lib.rs | 199 +- .../asset-tx-payment/src/mock.rs | 54 +- .../asset-tx-payment/src/tests.rs | 184 +- .../asset-tx-payment/src/weights.rs | 146 ++ .../skip-feeless-payment/src/lib.rs | 111 +- .../skip-feeless-payment/src/mock.rs | 30 +- .../skip-feeless-payment/src/tests.rs | 5 +- .../transaction-payment/src/benchmarking.rs | 81 + .../frame/transaction-payment/src/lib.rs | 146 +- .../frame/transaction-payment/src/mock.rs | 10 + .../frame/transaction-payment/src/payment.rs | 56 +- .../frame/transaction-payment/src/tests.rs | 444 ++-- .../frame/transaction-payment/src/types.rs | 2 +- .../frame/transaction-payment/src/weights.rs | 92 + .../frame/transaction-storage/src/weights.rs | 185 +- substrate/frame/treasury/src/weights.rs | 348 +-- substrate/frame/tx-pause/src/weights.rs | 38 +- substrate/frame/uniques/src/weights.rs | 368 +-- substrate/frame/utility/src/weights.rs | 161 +- substrate/frame/vesting/src/weights.rs | 234 +- substrate/frame/whitelist/src/weights.rs | 193 +- substrate/primitives/inherents/src/lib.rs | 4 +- substrate/primitives/metadata-ir/src/lib.rs | 2 +- substrate/primitives/metadata-ir/src/types.rs | 19 +- substrate/primitives/metadata-ir/src/v14.rs | 10 +- substrate/primitives/metadata-ir/src/v15.rs | 8 +- substrate/primitives/runtime/Cargo.toml | 2 + .../runtime/src/generic/checked_extrinsic.rs | 112 +- .../primitives/runtime/src/generic/mod.rs | 4 +- .../src/generic/unchecked_extrinsic.rs | 730 ++++-- substrate/primitives/runtime/src/lib.rs | 19 +- substrate/primitives/runtime/src/testing.rs | 195 +- .../runtime/src/{traits.rs => traits/mod.rs} | 244 +- .../as_transaction_extension.rs | 133 + .../dispatch_transaction.rs | 141 ++ .../src/traits/transaction_extension/mod.rs | 526 ++++ .../runtime/src/transaction_validity.rs | 6 +- substrate/scripts/run_all_benchmarks.sh | 13 + substrate/test-utils/runtime/src/extrinsic.rs | 17 +- substrate/test-utils/runtime/src/lib.rs | 93 +- .../benchmarking-cli/src/pallet/template.hbs | 4 + .../frame/remote-externalities/src/lib.rs | 5 +- substrate/utils/frame/rpc/client/src/lib.rs | 8 +- 349 files changed, 25345 insertions(+), 16846 deletions(-) create mode 100644 cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs create mode 100644 cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_tx_payment.rs create mode 100644 cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_transaction_payment.rs create mode 100644 cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs create mode 100644 cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_tx_payment.rs create mode 100644 cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_transaction_payment.rs create mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs create mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_transaction_payment.rs create mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs create mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_transaction_payment.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_transaction_payment.rs create mode 100644 cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs create mode 100644 cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_transaction_payment.rs create mode 100644 cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs create mode 100644 cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_transaction_payment.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs create mode 100644 cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs create mode 100644 cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_transaction_payment.rs create mode 100644 cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs create mode 100644 cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_transaction_payment.rs create mode 100644 cumulus/primitives/storage-weight-reclaim/src/benchmarking.rs create mode 100644 cumulus/primitives/storage-weight-reclaim/src/tests.rs delete mode 100644 docs/sdk/src/reference_docs/signed_extensions.rs create mode 100644 docs/sdk/src/reference_docs/transaction_extensions.rs create mode 100644 polkadot/runtime/rococo/src/weights/frame_system_extensions.rs create mode 100644 polkadot/runtime/rococo/src/weights/pallet_recovery.rs create mode 100644 polkadot/runtime/rococo/src/weights/pallet_transaction_payment.rs create mode 100644 polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs create mode 100644 polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs create mode 100644 polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs create mode 100644 polkadot/runtime/westend/src/weights/frame_system_extensions.rs create mode 100644 polkadot/runtime/westend/src/weights/pallet_transaction_payment.rs create mode 100644 prdoc/pr_2280.prdoc create mode 100644 substrate/frame/support/src/transaction_extensions.rs create mode 100644 substrate/frame/system/benchmarking/src/extensions.rs create mode 100644 substrate/frame/system/src/extensions/weights.rs create mode 100644 substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs create mode 100644 substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs create mode 100644 substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs create mode 100644 substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs create mode 100644 substrate/frame/transaction-payment/src/benchmarking.rs create mode 100644 substrate/frame/transaction-payment/src/weights.rs rename substrate/primitives/runtime/src/{traits.rs => traits/mod.rs} (94%) create mode 100644 substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs create mode 100644 substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs create mode 100644 substrate/primitives/runtime/src/traits/transaction_extension/mod.rs diff --git a/Cargo.lock b/Cargo.lock index ad218001b40..81881984378 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4070,11 +4070,13 @@ dependencies = [ "cumulus-primitives-proof-size-hostfunction", "cumulus-test-runtime", "docify 0.2.7", + "frame-benchmarking", "frame-support", "frame-system", "log", "parity-scale-codec", "scale-info", + "sp-core", "sp-io", "sp-runtime", "sp-std 14.0.0", @@ -5938,7 +5940,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29f9df8a11882c4e3335eb2d18a0137c505d9ca927470b0cac9c6f0ae07d28f7" dependencies = [ - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -6827,7 +6829,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.2", - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -7834,9 +7836,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" [[package]] name = "lioness" @@ -9177,6 +9179,7 @@ dependencies = [ name = "pallet-asset-conversion-tx-payment" version = "10.0.0" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", "pallet-asset-conversion", @@ -11119,6 +11122,7 @@ dependencies = [ name = "pallet-transaction-payment" version = "28.0.0" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", "pallet-balances", @@ -14333,7 +14337,7 @@ dependencies = [ "hex", "lazy_static", "procfs-core", - "rustix 0.38.21", + "rustix 0.38.25", ] [[package]] @@ -15458,14 +15462,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.21" +version = "0.38.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" +checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" dependencies = [ "bitflags 2.4.0", "errno", "libc", - "linux-raw-sys 0.4.10", + "linux-raw-sys 0.4.11", "windows-sys 0.48.0", ] @@ -18721,7 +18725,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.4.1" -source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +source = "git+https://github.com/paritytech/polkadot-sdk#838a534da874cf6071fba1df07643c6c5b033ae0" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -19026,6 +19030,7 @@ dependencies = [ "sp-tracing 16.0.0", "sp-weights", "substrate-test-runtime-client", + "tuplex", "zstd 0.12.4", ] @@ -19074,7 +19079,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +source = "git+https://github.com/paritytech/polkadot-sdk#838a534da874cf6071fba1df07643c6c5b033ae0" dependencies = [ "Inflector", "proc-macro-crate 1.3.1", @@ -20293,7 +20298,7 @@ dependencies = [ "cfg-if", "fastrand 2.0.0", "redox_syscall 0.4.1", - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -20312,7 +20317,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -21158,6 +21163,12 @@ dependencies = [ "utf-8", ] +[[package]] +name = "tuplex" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "676ac81d5454c4dcf37955d34fa8626ede3490f744b86ca14a7b90168d2a08aa" + [[package]] name = "twox-hash" version = "1.6.3" diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index fac88b20ca5..ee54f15f38b 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -93,6 +93,7 @@ runtime-benchmarks = [ "pallet-bridge-messages/runtime-benchmarks", "pallet-bridge-parachains/runtime-benchmarks", "pallet-bridge-relayers/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", diff --git a/bridges/bin/runtime-common/src/lib.rs b/bridges/bin/runtime-common/src/lib.rs index 2722f6f1c6d..035077408c4 100644 --- a/bridges/bin/runtime-common/src/lib.rs +++ b/bridges/bin/runtime-common/src/lib.rs @@ -105,43 +105,48 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { ($call:ty, $account_id:ty, $($filter_call:ty),*) => { #[derive(Clone, codec::Decode, Default, codec::Encode, Eq, PartialEq, sp_runtime::RuntimeDebug, scale_info::TypeInfo)] pub struct BridgeRejectObsoleteHeadersAndMessages; - impl sp_runtime::traits::SignedExtension for BridgeRejectObsoleteHeadersAndMessages { + impl sp_runtime::traits::TransactionExtensionBase for BridgeRejectObsoleteHeadersAndMessages { const IDENTIFIER: &'static str = "BridgeRejectObsoleteHeadersAndMessages"; - type AccountId = $account_id; - type Call = $call; - type AdditionalSigned = (); + type Implicit = (); + } + impl sp_runtime::traits::TransactionExtension<$call, Context> for BridgeRejectObsoleteHeadersAndMessages { type Pre = (); - - fn additional_signed(&self) -> sp_std::result::Result< - (), - sp_runtime::transaction_validity::TransactionValidityError, - > { - Ok(()) - } + type Val = (); fn validate( &self, - _who: &Self::AccountId, - call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, + origin: <$call as sp_runtime::traits::Dispatchable>::RuntimeOrigin, + call: &$call, + _info: &sp_runtime::traits::DispatchInfoOf<$call>, _len: usize, - ) -> sp_runtime::transaction_validity::TransactionValidity { - let valid = sp_runtime::transaction_validity::ValidTransaction::default(); + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl codec::Encode, + ) -> Result< + ( + sp_runtime::transaction_validity::ValidTransaction, + Self::Val, + <$call as sp_runtime::traits::Dispatchable>::RuntimeOrigin, + ), sp_runtime::transaction_validity::TransactionValidityError + > { + let tx_validity = sp_runtime::transaction_validity::ValidTransaction::default(); $( - let valid = valid - .combine_with(<$filter_call as $crate::BridgeRuntimeFilterCall<$call>>::validate(call)?); + let call_filter_validity = <$filter_call as $crate::BridgeRuntimeFilterCall<$call>>::validate(call)?; + let tx_validity = tx_validity.combine_with(call_filter_validity); )* - Ok(valid) + Ok((tx_validity, (), origin)) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &sp_runtime::traits::DispatchInfoOf, - len: usize, + _val: Self::Val, + _origin: &<$call as sp_runtime::traits::Dispatchable>::RuntimeOrigin, + _call: &$call, + _info: &sp_runtime::traits::DispatchInfoOf<$call>, + _len: usize, + _context: &Context, ) -> Result { - self.validate(who, call, info, len).map(drop) + Ok(()) } } }; @@ -150,12 +155,14 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { #[cfg(test)] mod tests { use crate::BridgeRuntimeFilterCall; - use frame_support::{assert_err, assert_ok}; + use codec::Encode; + use frame_support::assert_err; use sp_runtime::{ - traits::SignedExtension, + traits::DispatchTransaction, transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, }; + #[derive(Encode)] pub struct MockCall { data: u32, } @@ -206,17 +213,20 @@ mod tests { ); assert_err!( - BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 1 }, &(), 0), + BridgeRejectObsoleteHeadersAndMessages.validate_only((), &MockCall { data: 1 }, &(), 0), InvalidTransaction::Custom(1) ); assert_err!( - BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 2 }, &(), 0), + BridgeRejectObsoleteHeadersAndMessages.validate_only((), &MockCall { data: 2 }, &(), 0), InvalidTransaction::Custom(2) ); - assert_ok!( - BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 3 }, &(), 0), + assert_eq!( + BridgeRejectObsoleteHeadersAndMessages + .validate_only((), &MockCall { data: 3 }, &(), 0) + .unwrap() + .0, ValidTransaction { priority: 3, ..Default::default() } ) } diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index 8877a4fd95c..f147f1404f0 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -164,6 +164,7 @@ impl pallet_balances::Config for TestRuntime { type AccountStore = System; } +#[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig as pallet_transaction_payment::DefaultConfig)] impl pallet_transaction_payment::Config for TestRuntime { type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; type OperationalFeeMultiplier = ConstU8<5>; @@ -176,7 +177,6 @@ impl pallet_transaction_payment::Config for TestRuntime { MinimumMultiplier, MaximumMultiplier, >; - type RuntimeEvent = RuntimeEvent; } impl pallet_bridge_grandpa::Config for TestRuntime { diff --git a/bridges/bin/runtime-common/src/priority_calculator.rs b/bridges/bin/runtime-common/src/priority_calculator.rs index a597fb9e2f4..0c53018330e 100644 --- a/bridges/bin/runtime-common/src/priority_calculator.rs +++ b/bridges/bin/runtime-common/src/priority_calculator.rs @@ -169,12 +169,15 @@ mod integrity_tests { // nodes to the proof (x0.5 because we expect some nodes to be reused) let estimated_message_size = 512; // let's say all our messages have the same dispatch weight - let estimated_message_dispatch_weight = - Runtime::WeightInfo::message_dispatch_weight(estimated_message_size); + let estimated_message_dispatch_weight = >::WeightInfo::message_dispatch_weight( + estimated_message_size + ); // messages proof argument size is (for every message) messages size + some additional // trie nodes. Some of them are reused by different messages, so let's take 2/3 of default // "overhead" constant - let messages_proof_size = Runtime::WeightInfo::expected_extra_storage_proof_size() + let messages_proof_size = >::WeightInfo::expected_extra_storage_proof_size() .saturating_mul(2) .saturating_div(3) .saturating_add(estimated_message_size) @@ -182,7 +185,7 @@ mod integrity_tests { // finally we are able to estimate transaction size and weight let transaction_size = base_tx_size.saturating_add(messages_proof_size); - let transaction_weight = Runtime::WeightInfo::receive_messages_proof_weight( + let transaction_weight = >::WeightInfo::receive_messages_proof_weight( &PreComputedSize(transaction_size as _), messages as _, estimated_message_dispatch_weight.saturating_mul(messages), diff --git a/bridges/bin/runtime-common/src/refund_relayer_extension.rs b/bridges/bin/runtime-common/src/refund_relayer_extension.rs index bfcb82ad166..b912f8445ac 100644 --- a/bridges/bin/runtime-common/src/refund_relayer_extension.rs +++ b/bridges/bin/runtime-common/src/refund_relayer_extension.rs @@ -48,9 +48,12 @@ use pallet_transaction_payment::{Config as TransactionPaymentConfig, OnChargeTra use pallet_utility::{Call as UtilityCall, Config as UtilityConfig, Pallet as UtilityPallet}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, Get, PostDispatchInfoOf, SignedExtension, Zero}, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, Get, PostDispatchInfoOf, + TransactionExtension, TransactionExtensionBase, ValidateResult, Zero, + }, transaction_validity::{ - TransactionPriority, TransactionValidity, TransactionValidityError, ValidTransactionBuilder, + InvalidTransaction, TransactionPriority, TransactionValidityError, ValidTransactionBuilder, }, DispatchResult, FixedPointOperand, RuntimeDebug, }; @@ -239,8 +242,8 @@ pub enum RelayerAccountAction { Slash(AccountId, RewardsAccountParams), } -/// Everything common among our refund signed extensions. -pub trait RefundSignedExtension: +/// Everything common among our refund transaction extensions. +pub trait RefundTransactionExtension: 'static + Clone + Codec + sp_std::fmt::Debug + Default + Eq + PartialEq + Send + Sync + TypeInfo where >::BridgedChain: @@ -456,8 +459,8 @@ where } } -/// Adapter that allow implementing `sp_runtime::traits::SignedExtension` for any -/// `RefundSignedExtension`. +/// Adapter that allow implementing `sp_runtime::traits::TransactionExtension` for any +/// `RefundTransactionExtension`. #[derive( DefaultNoBound, CloneNoBound, @@ -468,12 +471,13 @@ where RuntimeDebugNoBound, TypeInfo, )] -pub struct RefundSignedExtensionAdapter(T) +pub struct RefundTransactionExtensionAdapter(T) where >::BridgedChain: Chain; -impl SignedExtension for RefundSignedExtensionAdapter +impl TransactionExtensionBase + for RefundTransactionExtensionAdapter where >::BridgedChain: Chain, @@ -483,22 +487,35 @@ where + MessagesCallSubType::Instance>, { const IDENTIFIER: &'static str = T::Id::STR; - type AccountId = AccountIdOf; - type Call = CallOf; - type AdditionalSigned = (); - type Pre = Option>>; + type Implicit = (); +} - fn additional_signed(&self) -> Result<(), TransactionValidityError> { - Ok(()) - } +impl TransactionExtension, Context> + for RefundTransactionExtensionAdapter +where + >::BridgedChain: + Chain, + CallOf: Dispatchable + + IsSubType, T::Runtime>> + + GrandpaCallSubType + + MessagesCallSubType::Instance>, + as Dispatchable>::RuntimeOrigin: + AsSystemOriginSigner> + Clone, +{ + type Pre = Option>>; + type Val = Option; fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + origin: as Dispatchable>::RuntimeOrigin, + call: &CallOf, + _info: &DispatchInfoOf>, _len: usize, - ) -> TransactionValidity { + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult> { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; // this is the only relevant line of code for the `pre_dispatch` // // we're not calling `validate` from `pre_dispatch` directly because of performance @@ -511,12 +528,12 @@ where // we only boost priority of presumably correct message delivery transactions let bundled_messages = match T::bundled_messages_for_priority_boost(parsed_call.as_ref()) { Some(bundled_messages) => bundled_messages, - None => return Ok(Default::default()), + None => return Ok((Default::default(), parsed_call, origin)), }; // we only boost priority if relayer has staked required balance if !RelayersPallet::::is_registration_active(who) { - return Ok(Default::default()) + return Ok((Default::default(), parsed_call, origin)) } // compute priority boost @@ -535,20 +552,21 @@ where priority_boost, ); - valid_transaction.build() + let validity = valid_transaction.build()?; + Ok((validity, parsed_call, origin)) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + val: Self::Val, + origin: & as Dispatchable>::RuntimeOrigin, + _call: &CallOf, + _info: &DispatchInfoOf>, _len: usize, + _context: &Context, ) -> Result { - // this is a relevant piece of `validate` that we need here (in `pre_dispatch`) - let parsed_call = T::parse_and_check_for_obsolete_call(call)?; - - Ok(parsed_call.map(|call_info| { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + Ok(val.map(|call_info| { log::trace!( target: "runtime::bridge", "{} via {:?} parsed bridge transaction in pre-dispatch: {:?}", @@ -561,13 +579,14 @@ where } fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + pre: Self::Pre, + info: &DispatchInfoOf>, + post_info: &PostDispatchInfoOf>, len: usize, result: &DispatchResult, + _context: &Context, ) -> Result<(), TransactionValidityError> { - let call_result = T::analyze_call_result(pre, info, post_info, len, result); + let call_result = T::analyze_call_result(Some(pre), info, post_info, len, result); match call_result { RelayerAccountAction::None => (), @@ -595,7 +614,7 @@ where } } -/// Signed extension that refunds a relayer for new messages coming from a parachain. +/// Transaction extension that refunds a relayer for new messages coming from a parachain. /// /// Also refunds relayer for successful finality delivery if it comes in batch (`utility.batchAll`) /// with message delivery transaction. Batch may deliver either both relay chain header and @@ -636,7 +655,7 @@ pub struct RefundBridgedParachainMessages, ); -impl RefundSignedExtension +impl RefundTransactionExtension for RefundBridgedParachainMessages where Self: 'static + Send + Sync, @@ -730,13 +749,13 @@ where } } -/// Signed extension that refunds a relayer for new messages coming from a standalone (GRANDPA) +/// Transaction extension that refunds a relayer for new messages coming from a standalone (GRANDPA) /// chain. /// /// Also refunds relayer for successful finality delivery if it comes in batch (`utility.batchAll`) /// with message delivery transaction. Batch may deliver either both relay chain header and -/// parachain head, or just parachain head. Corresponding headers must be used in messages -/// proof verification. +/// parachain head, or just parachain head. Corresponding headers must be used in messages proof +/// verification. /// /// Extension does not refund transaction tip due to security reasons. #[derive( @@ -771,7 +790,7 @@ pub struct RefundBridgedGrandpaMessages, ); -impl RefundSignedExtension +impl RefundTransactionExtension for RefundBridgedGrandpaMessages where Self: 'static + Send + Sync, @@ -869,8 +888,8 @@ mod tests { Call as ParachainsCall, Pallet as ParachainsPallet, RelayBlockHash, }; use sp_runtime::{ - traits::{ConstU64, Header as HeaderT}, - transaction_validity::{InvalidTransaction, ValidTransaction}, + traits::{ConstU64, DispatchTransaction, Header as HeaderT}, + transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, DispatchError, }; @@ -899,7 +918,7 @@ mod tests { ConstU64<1>, StrTestExtension, >; - type TestGrandpaExtension = RefundSignedExtensionAdapter; + type TestGrandpaExtension = RefundTransactionExtensionAdapter; type TestExtensionProvider = RefundBridgedParachainMessages< TestRuntime, DefaultRefundableParachainId<(), TestParachain>, @@ -908,7 +927,7 @@ mod tests { ConstU64<1>, StrTestExtension, >; - type TestExtension = RefundSignedExtensionAdapter; + type TestExtension = RefundTransactionExtensionAdapter; fn initial_balance_of_relayer_account_at_this_chain() -> ThisChainBalance { let test_stake: ThisChainBalance = TestStake::get(); @@ -1407,14 +1426,28 @@ mod tests { fn run_validate(call: RuntimeCall) -> TransactionValidity { let extension: TestExtension = - RefundSignedExtensionAdapter(RefundBridgedParachainMessages(PhantomData)); - extension.validate(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + RefundTransactionExtensionAdapter(RefundBridgedParachainMessages(PhantomData)); + extension + .validate_only( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + ) + .map(|res| res.0) } fn run_grandpa_validate(call: RuntimeCall) -> TransactionValidity { let extension: TestGrandpaExtension = - RefundSignedExtensionAdapter(RefundBridgedGrandpaMessages(PhantomData)); - extension.validate(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + RefundTransactionExtensionAdapter(RefundBridgedGrandpaMessages(PhantomData)); + extension + .validate_only( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + ) + .map(|res| res.0) } fn run_validate_ignore_priority(call: RuntimeCall) -> TransactionValidity { @@ -1428,16 +1461,30 @@ mod tests { call: RuntimeCall, ) -> Result>, TransactionValidityError> { let extension: TestExtension = - RefundSignedExtensionAdapter(RefundBridgedParachainMessages(PhantomData)); - extension.pre_dispatch(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + RefundTransactionExtensionAdapter(RefundBridgedParachainMessages(PhantomData)); + extension + .validate_and_prepare( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + ) + .map(|(pre, _)| pre) } fn run_grandpa_pre_dispatch( call: RuntimeCall, ) -> Result>, TransactionValidityError> { let extension: TestGrandpaExtension = - RefundSignedExtensionAdapter(RefundBridgedGrandpaMessages(PhantomData)); - extension.pre_dispatch(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + RefundTransactionExtensionAdapter(RefundBridgedGrandpaMessages(PhantomData)); + extension + .validate_and_prepare( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + ) + .map(|(pre, _)| pre) } fn dispatch_info() -> DispatchInfo { @@ -1460,11 +1507,12 @@ mod tests { dispatch_result: DispatchResult, ) { let post_dispatch_result = TestExtension::post_dispatch( - Some(pre_dispatch_data), + pre_dispatch_data, &dispatch_info(), &post_dispatch_info(), 1024, &dispatch_result, + &(), ); assert_eq!(post_dispatch_result, Ok(())); } diff --git a/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs b/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs index c49aa4b8563..f186f6427ae 100644 --- a/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs +++ b/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs @@ -26,7 +26,7 @@ pub use bp_polkadot_core::{ }; use bp_messages::*; -use bp_polkadot_core::SuffixedCommonSignedExtension; +use bp_polkadot_core::SuffixedCommonTransactionExtension; use bp_runtime::extensions::{ BridgeRejectObsoleteHeadersAndMessages, RefundBridgedParachainMessagesSchema, }; @@ -164,7 +164,7 @@ pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 1024; pub const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 4096; /// Signed extension that is used by all bridge hubs. -pub type SignedExtension = SuffixedCommonSignedExtension<( +pub type TransactionExtension = SuffixedCommonTransactionExtension<( BridgeRejectObsoleteHeadersAndMessages, RefundBridgedParachainMessagesSchema, )>; diff --git a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs b/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs index c4e697fbe95..992ef1bd7a1 100644 --- a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs +++ b/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs @@ -107,5 +107,5 @@ frame_support::parameter_types! { /// Transaction fee that is paid at the Rococo BridgeHub for delivering single outbound message confirmation. /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) - pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 5_380_829_647; + pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 5_380_904_835; } diff --git a/bridges/primitives/chain-kusama/src/lib.rs b/bridges/primitives/chain-kusama/src/lib.rs index e3b4d0520f6..253a1010e83 100644 --- a/bridges/primitives/chain-kusama/src/lib.rs +++ b/bridges/primitives/chain-kusama/src/lib.rs @@ -59,8 +59,8 @@ impl ChainWithGrandpa for Kusama { const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } -// The SignedExtension used by Kusama. -pub use bp_polkadot_core::CommonSignedExtension as SignedExtension; +// The TransactionExtension used by Kusama. +pub use bp_polkadot_core::CommonTransactionExtension as TransactionExtension; /// Name of the parachains pallet in the Kusama runtime. pub const PARAS_PALLET_NAME: &str = "Paras"; diff --git a/bridges/primitives/chain-polkadot-bulletin/src/lib.rs b/bridges/primitives/chain-polkadot-bulletin/src/lib.rs index f2eebf93124..73dd122bd15 100644 --- a/bridges/primitives/chain-polkadot-bulletin/src/lib.rs +++ b/bridges/primitives/chain-polkadot-bulletin/src/lib.rs @@ -25,7 +25,7 @@ use bp_runtime::{ decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, extensions::{ CheckEra, CheckGenesis, CheckNonZeroSender, CheckNonce, CheckSpecVersion, CheckTxVersion, - CheckWeight, GenericSignedExtension, GenericSignedExtensionSchema, + CheckWeight, GenericTransactionExtension, GenericTransactionExtensionSchema, }, Chain, ChainId, TransactionEra, }; @@ -37,7 +37,12 @@ use frame_support::{ }; use frame_system::limits; use scale_info::TypeInfo; -use sp_runtime::{traits::DispatchInfoOf, transaction_validity::TransactionValidityError, Perbill}; +use sp_runtime::{ + impl_tx_ext_default, + traits::{Dispatchable, TransactionExtensionBase}, + transaction_validity::TransactionValidityError, + Perbill, +}; // This chain reuses most of Polkadot primitives. pub use bp_polkadot_core::{ @@ -71,10 +76,10 @@ pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 1024; pub const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 4096; /// This signed extension is used to ensure that the chain transactions are signed by proper -pub type ValidateSigned = GenericSignedExtensionSchema<(), ()>; +pub type ValidateSigned = GenericTransactionExtensionSchema<(), ()>; /// Signed extension schema, used by Polkadot Bulletin. -pub type SignedExtensionSchema = GenericSignedExtension<( +pub type TransactionExtensionSchema = GenericTransactionExtension<( ( CheckNonZeroSender, CheckSpecVersion, @@ -87,34 +92,30 @@ pub type SignedExtensionSchema = GenericSignedExtension<( ValidateSigned, )>; -/// Signed extension, used by Polkadot Bulletin. +/// Transaction extension, used by Polkadot Bulletin. #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub struct SignedExtension(SignedExtensionSchema); +pub struct TransactionExtension(TransactionExtensionSchema); -impl sp_runtime::traits::SignedExtension for SignedExtension { +impl TransactionExtensionBase for TransactionExtension { const IDENTIFIER: &'static str = "Not needed."; - type AccountId = (); - type Call = (); - type AdditionalSigned = - ::AdditionalSigned; - type Pre = (); + type Implicit = ::Implicit; - fn additional_signed(&self) -> Result { - self.0.additional_signed() + fn implicit(&self) -> Result { + ::implicit(&self.0) } +} - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } +impl sp_runtime::traits::TransactionExtension for TransactionExtension +where + C: Dispatchable, +{ + type Pre = (); + type Val = (); + + impl_tx_ext_default!(C; Context; validate prepare); } -impl SignedExtension { +impl TransactionExtension { /// Create signed extension from its components. pub fn from_params( spec_version: u32, @@ -123,7 +124,7 @@ impl SignedExtension { genesis_hash: Hash, nonce: Nonce, ) -> Self { - Self(GenericSignedExtension::new( + Self(GenericTransactionExtension::new( ( ( (), // non-zero sender diff --git a/bridges/primitives/chain-polkadot/src/lib.rs b/bridges/primitives/chain-polkadot/src/lib.rs index fc5e10308a8..e5e2e7c3a04 100644 --- a/bridges/primitives/chain-polkadot/src/lib.rs +++ b/bridges/primitives/chain-polkadot/src/lib.rs @@ -61,8 +61,8 @@ impl ChainWithGrandpa for Polkadot { const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } -/// The SignedExtension used by Polkadot. -pub type SignedExtension = SuffixedCommonSignedExtension; +/// The TransactionExtension used by Polkadot. +pub type TransactionExtension = SuffixedCommonTransactionExtension; /// Name of the parachains pallet in the Polkadot runtime. pub const PARAS_PALLET_NAME: &str = "Paras"; diff --git a/bridges/primitives/chain-rococo/src/lib.rs b/bridges/primitives/chain-rococo/src/lib.rs index f1b256f0f09..267c6b2b1f0 100644 --- a/bridges/primitives/chain-rococo/src/lib.rs +++ b/bridges/primitives/chain-rococo/src/lib.rs @@ -59,8 +59,8 @@ impl ChainWithGrandpa for Rococo { const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } -// The SignedExtension used by Rococo. -pub use bp_polkadot_core::CommonSignedExtension as SignedExtension; +// The TransactionExtension used by Rococo. +pub use bp_polkadot_core::CommonTransactionExtension as TransactionExtension; /// Name of the parachains pallet in the Rococo runtime. pub const PARAS_PALLET_NAME: &str = "Paras"; diff --git a/bridges/primitives/chain-westend/src/lib.rs b/bridges/primitives/chain-westend/src/lib.rs index f03fd2160a7..afa02e8ee54 100644 --- a/bridges/primitives/chain-westend/src/lib.rs +++ b/bridges/primitives/chain-westend/src/lib.rs @@ -59,8 +59,8 @@ impl ChainWithGrandpa for Westend { const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } -// The SignedExtension used by Westend. -pub use bp_polkadot_core::CommonSignedExtension as SignedExtension; +// The TransactionExtension used by Westend. +pub use bp_polkadot_core::CommonTransactionExtension as TransactionExtension; /// Name of the parachains pallet in the Rococo runtime. pub const PARAS_PALLET_NAME: &str = "Paras"; diff --git a/bridges/primitives/polkadot-core/src/lib.rs b/bridges/primitives/polkadot-core/src/lib.rs index df2836495bb..d59b99db4b5 100644 --- a/bridges/primitives/polkadot-core/src/lib.rs +++ b/bridges/primitives/polkadot-core/src/lib.rs @@ -24,8 +24,8 @@ use bp_runtime::{ self, extensions::{ ChargeTransactionPayment, CheckEra, CheckGenesis, CheckNonZeroSender, CheckNonce, - CheckSpecVersion, CheckTxVersion, CheckWeight, GenericSignedExtension, - SignedExtensionSchema, + CheckSpecVersion, CheckTxVersion, CheckWeight, GenericTransactionExtension, + TransactionExtensionSchema, }, EncodedOrDecodedCall, StorageMapKeyProvider, TransactionEra, }; @@ -229,8 +229,12 @@ pub type SignedBlock = generic::SignedBlock; pub type Balance = u128; /// Unchecked Extrinsic type. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic, Signature, SignedExt>; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic< + AccountAddress, + EncodedOrDecodedCall, + Signature, + TransactionExt, +>; /// Account address, used by the Polkadot-like chain. pub type Address = MultiAddress; @@ -275,7 +279,7 @@ impl AccountInfoStorageMapKeyProvider { } /// Extra signed extension data that is used by most chains. -pub type CommonSignedExtra = ( +pub type CommonTransactionExtra = ( CheckNonZeroSender, CheckSpecVersion, CheckTxVersion, @@ -286,12 +290,12 @@ pub type CommonSignedExtra = ( ChargeTransactionPayment, ); -/// Extra signed extension data that starts with `CommonSignedExtra`. -pub type SuffixedCommonSignedExtension = - GenericSignedExtension<(CommonSignedExtra, Suffix)>; +/// Extra transaction extension data that starts with `CommonTransactionExtra`. +pub type SuffixedCommonTransactionExtension = + GenericTransactionExtension<(CommonTransactionExtra, Suffix)>; -/// Helper trait to define some extra methods on `SuffixedCommonSignedExtension`. -pub trait SuffixedCommonSignedExtensionExt { +/// Helper trait to define some extra methods on `SuffixedCommonTransactionExtension`. +pub trait SuffixedCommonTransactionExtensionExt { /// Create signed extension from its components. fn from_params( spec_version: u32, @@ -300,7 +304,7 @@ pub trait SuffixedCommonSignedExtensionExt { genesis_hash: Hash, nonce: Nonce, tip: Balance, - extra: (Suffix::Payload, Suffix::AdditionalSigned), + extra: (Suffix::Payload, Suffix::Implicit), ) -> Self; /// Return transaction nonce. @@ -310,9 +314,10 @@ pub trait SuffixedCommonSignedExtensionExt { fn tip(&self) -> Balance; } -impl SuffixedCommonSignedExtensionExt for SuffixedCommonSignedExtension +impl SuffixedCommonTransactionExtensionExt + for SuffixedCommonTransactionExtension where - Suffix: SignedExtensionSchema, + Suffix: TransactionExtensionSchema, { fn from_params( spec_version: u32, @@ -321,9 +326,9 @@ where genesis_hash: Hash, nonce: Nonce, tip: Balance, - extra: (Suffix::Payload, Suffix::AdditionalSigned), + extra: (Suffix::Payload, Suffix::Implicit), ) -> Self { - GenericSignedExtension::new( + GenericTransactionExtension::new( ( ( (), // non-zero sender @@ -365,7 +370,7 @@ where } /// Signed extension that is used by most chains. -pub type CommonSignedExtension = SuffixedCommonSignedExtension<()>; +pub type CommonTransactionExtension = SuffixedCommonTransactionExtension<()>; #[cfg(test)] mod tests { diff --git a/bridges/primitives/runtime/src/extensions.rs b/bridges/primitives/runtime/src/extensions.rs index d896bc92eff..a31e7b5bb47 100644 --- a/bridges/primitives/runtime/src/extensions.rs +++ b/bridges/primitives/runtime/src/extensions.rs @@ -20,135 +20,138 @@ use codec::{Compact, Decode, Encode}; use impl_trait_for_tuples::impl_for_tuples; use scale_info::{StaticTypeInfo, TypeInfo}; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension}, + impl_tx_ext_default, + traits::{Dispatchable, TransactionExtension, TransactionExtensionBase}, transaction_validity::TransactionValidityError, }; use sp_std::{fmt::Debug, marker::PhantomData}; -/// Trait that describes some properties of a `SignedExtension` that are needed in order to send a -/// transaction to the chain. -pub trait SignedExtensionSchema: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo { +/// Trait that describes some properties of a `TransactionExtension` that are needed in order to +/// send a transaction to the chain. +pub trait TransactionExtensionSchema: + Encode + Decode + Debug + Eq + Clone + StaticTypeInfo +{ /// A type of the data encoded as part of the transaction. type Payload: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo; /// Parameters which are part of the payload used to produce transaction signature, /// but don't end up in the transaction itself (i.e. inherent part of the runtime). - type AdditionalSigned: Encode + Debug + Eq + Clone + StaticTypeInfo; + type Implicit: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo; } -impl SignedExtensionSchema for () { +impl TransactionExtensionSchema for () { type Payload = (); - type AdditionalSigned = (); + type Implicit = (); } -/// An implementation of `SignedExtensionSchema` using generic params. +/// An implementation of `TransactionExtensionSchema` using generic params. #[derive(Encode, Decode, Clone, Debug, PartialEq, Eq, TypeInfo)] -pub struct GenericSignedExtensionSchema(PhantomData<(P, S)>); +pub struct GenericTransactionExtensionSchema(PhantomData<(P, S)>); -impl SignedExtensionSchema for GenericSignedExtensionSchema +impl TransactionExtensionSchema for GenericTransactionExtensionSchema where P: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo, - S: Encode + Debug + Eq + Clone + StaticTypeInfo, + S: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo, { type Payload = P; - type AdditionalSigned = S; + type Implicit = S; } -/// The `SignedExtensionSchema` for `frame_system::CheckNonZeroSender`. -pub type CheckNonZeroSender = GenericSignedExtensionSchema<(), ()>; +/// The `TransactionExtensionSchema` for `frame_system::CheckNonZeroSender`. +pub type CheckNonZeroSender = GenericTransactionExtensionSchema<(), ()>; -/// The `SignedExtensionSchema` for `frame_system::CheckSpecVersion`. -pub type CheckSpecVersion = GenericSignedExtensionSchema<(), u32>; +/// The `TransactionExtensionSchema` for `frame_system::CheckSpecVersion`. +pub type CheckSpecVersion = GenericTransactionExtensionSchema<(), u32>; -/// The `SignedExtensionSchema` for `frame_system::CheckTxVersion`. -pub type CheckTxVersion = GenericSignedExtensionSchema<(), u32>; +/// The `TransactionExtensionSchema` for `frame_system::CheckTxVersion`. +pub type CheckTxVersion = GenericTransactionExtensionSchema<(), u32>; -/// The `SignedExtensionSchema` for `frame_system::CheckGenesis`. -pub type CheckGenesis = GenericSignedExtensionSchema<(), Hash>; +/// The `TransactionExtensionSchema` for `frame_system::CheckGenesis`. +pub type CheckGenesis = GenericTransactionExtensionSchema<(), Hash>; -/// The `SignedExtensionSchema` for `frame_system::CheckEra`. -pub type CheckEra = GenericSignedExtensionSchema; +/// The `TransactionExtensionSchema` for `frame_system::CheckEra`. +pub type CheckEra = GenericTransactionExtensionSchema; -/// The `SignedExtensionSchema` for `frame_system::CheckNonce`. -pub type CheckNonce = GenericSignedExtensionSchema, ()>; +/// The `TransactionExtensionSchema` for `frame_system::CheckNonce`. +pub type CheckNonce = GenericTransactionExtensionSchema, ()>; -/// The `SignedExtensionSchema` for `frame_system::CheckWeight`. -pub type CheckWeight = GenericSignedExtensionSchema<(), ()>; +/// The `TransactionExtensionSchema` for `frame_system::CheckWeight`. +pub type CheckWeight = GenericTransactionExtensionSchema<(), ()>; -/// The `SignedExtensionSchema` for `pallet_transaction_payment::ChargeTransactionPayment`. -pub type ChargeTransactionPayment = GenericSignedExtensionSchema, ()>; +/// The `TransactionExtensionSchema` for `pallet_transaction_payment::ChargeTransactionPayment`. +pub type ChargeTransactionPayment = + GenericTransactionExtensionSchema, ()>; -/// The `SignedExtensionSchema` for `polkadot-runtime-common::PrevalidateAttests`. -pub type PrevalidateAttests = GenericSignedExtensionSchema<(), ()>; +/// The `TransactionExtensionSchema` for `polkadot-runtime-common::PrevalidateAttests`. +pub type PrevalidateAttests = GenericTransactionExtensionSchema<(), ()>; -/// The `SignedExtensionSchema` for `BridgeRejectObsoleteHeadersAndMessages`. -pub type BridgeRejectObsoleteHeadersAndMessages = GenericSignedExtensionSchema<(), ()>; +/// The `TransactionExtensionSchema` for `BridgeRejectObsoleteHeadersAndMessages`. +pub type BridgeRejectObsoleteHeadersAndMessages = GenericTransactionExtensionSchema<(), ()>; -/// The `SignedExtensionSchema` for `RefundBridgedParachainMessages`. +/// The `TransactionExtensionSchema` for `RefundBridgedParachainMessages`. /// This schema is dedicated for `RefundBridgedParachainMessages` signed extension as /// wildcard/placeholder, which relies on the scale encoding for `()` or `((), ())`, or `((), (), /// ())` is the same. So runtime can contains any kind of tuple: /// `(BridgeRefundBridgeHubRococoMessages)` /// `(BridgeRefundBridgeHubRococoMessages, BridgeRefundBridgeHubWestendMessages)` /// `(BridgeRefundParachainMessages1, ..., BridgeRefundParachainMessagesN)` -pub type RefundBridgedParachainMessagesSchema = GenericSignedExtensionSchema<(), ()>; +pub type RefundBridgedParachainMessagesSchema = GenericTransactionExtensionSchema<(), ()>; #[impl_for_tuples(1, 12)] -impl SignedExtensionSchema for Tuple { +impl TransactionExtensionSchema for Tuple { for_tuples!( type Payload = ( #( Tuple::Payload ),* ); ); - for_tuples!( type AdditionalSigned = ( #( Tuple::AdditionalSigned ),* ); ); + for_tuples!( type Implicit = ( #( Tuple::Implicit ),* ); ); } /// A simplified version of signed extensions meant for producing signed transactions /// and signed payloads in the client code. #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub struct GenericSignedExtension { +pub struct GenericTransactionExtension { /// A payload that is included in the transaction. pub payload: S::Payload, #[codec(skip)] // It may be set to `None` if extensions are decoded. We are never reconstructing transactions - // (and it makes no sense to do that) => decoded version of `SignedExtensions` is only used to - // read fields of the `payload`. And when resigning transaction, we're reconstructing - // `SignedExtensions` from scratch. - additional_signed: Option, + // (and it makes no sense to do that) => decoded version of `TransactionExtensions` is only + // used to read fields of the `payload`. And when resigning transaction, we're reconstructing + // `TransactionExtensions` from scratch. + implicit: Option, } -impl GenericSignedExtension { - /// Create new `GenericSignedExtension` object. - pub fn new(payload: S::Payload, additional_signed: Option) -> Self { - Self { payload, additional_signed } +impl GenericTransactionExtension { + /// Create new `GenericTransactionExtension` object. + pub fn new(payload: S::Payload, implicit: Option) -> Self { + Self { payload, implicit } } } -impl SignedExtension for GenericSignedExtension +impl TransactionExtensionBase for GenericTransactionExtension where - S: SignedExtensionSchema, + S: TransactionExtensionSchema, S::Payload: Send + Sync, - S::AdditionalSigned: Send + Sync, + S::Implicit: Send + Sync, { const IDENTIFIER: &'static str = "Not needed."; - type AccountId = (); - type Call = (); - type AdditionalSigned = S::AdditionalSigned; - type Pre = (); + type Implicit = S::Implicit; - fn additional_signed(&self) -> Result { + fn implicit(&self) -> Result { // we shall not ever see this error in relay, because we are never signing decoded // transactions. Instead we're constructing and signing new transactions. So the error code // is kinda random here - self.additional_signed.clone().ok_or( - frame_support::unsigned::TransactionValidityError::Unknown( + self.implicit + .clone() + .ok_or(frame_support::unsigned::TransactionValidityError::Unknown( frame_support::unsigned::UnknownTransaction::Custom(0xFF), - ), - ) + )) } +} +impl TransactionExtension for GenericTransactionExtension +where + C: Dispatchable, + S: TransactionExtensionSchema, + S::Payload: Send + Sync, + S::Implicit: Send + Sync, +{ + type Pre = (); + type Val = (); - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } + impl_tx_ext_default!(C; Context; validate prepare); } diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index 4e8b311cb97..5f169e82f49 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -181,6 +181,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/cumulus/parachain-template/runtime/Cargo.toml b/cumulus/parachain-template/runtime/Cargo.toml index 1873bd0a23e..60d8d6f5136 100644 --- a/cumulus/parachain-template/runtime/Cargo.toml +++ b/cumulus/parachain-template/runtime/Cargo.toml @@ -138,6 +138,7 @@ runtime-benchmarks = [ "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", "cumulus-primitives-core/runtime-benchmarks", + "cumulus-primitives-storage-weight-reclaim/runtime-benchmarks", "cumulus-primitives-utility/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -150,6 +151,7 @@ runtime-benchmarks = [ "pallet-parachain-template/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", diff --git a/cumulus/parachain-template/runtime/src/lib.rs b/cumulus/parachain-template/runtime/src/lib.rs index 004a5d70ebc..f7754e594e6 100644 --- a/cumulus/parachain-template/runtime/src/lib.rs +++ b/cumulus/parachain-template/runtime/src/lib.rs @@ -97,8 +97,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -112,7 +112,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< @@ -353,6 +353,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = (); } impl pallet_sudo::Config for Runtime { @@ -529,6 +530,7 @@ construct_runtime!( mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_session, SessionBench::] [pallet_timestamp, Timestamp] @@ -712,6 +714,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; let mut list = Vec::::new(); @@ -727,6 +730,7 @@ impl_runtime_apis! { use frame_benchmarking::{BenchmarkError, Benchmarking, BenchmarkBatch}; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 05936e93993..3eb63a24b74 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -119,6 +119,7 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-asset-conversion-tx-payment/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", @@ -130,6 +131,7 @@ runtime-benchmarks = [ "pallet-proxy/runtime-benchmarks", "pallet-state-trie-migration/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-uniques/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 17a12dd2f6f..32966ab6341 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -178,6 +178,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; @@ -234,6 +235,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -761,6 +763,9 @@ impl pallet_asset_conversion_tx_payment::Config for Runtime { type Fungibles = LocalAndForeignAssets; type OnChargeAssetTransaction = AssetConversionAdapter; + type WeightInfo = weights::pallet_asset_conversion_tx_payment::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetConversionTxHelper; } parameter_types! { @@ -948,8 +953,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -961,7 +966,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( pallet_collator_selection::migration::v1::MigrateToV1, @@ -1034,14 +1039,77 @@ pub type Executive = frame_executive::Executive< Migrations, >; +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetConversionTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl + pallet_asset_conversion_tx_payment::BenchmarkHelperTrait< + AccountId, + xcm::v3::MultiLocation, + xcm::v3::MultiLocation, + > for AssetConversionTxHelper +{ + fn create_asset_id_parameter(seed: u32) -> (xcm::v3::MultiLocation, xcm::v3::MultiLocation) { + // Use a different parachain' foreign assets pallet so that the asset is indeed foreign. + let asset_id = xcm::v3::MultiLocation::new( + 1, + xcm::v3::Junctions::X3( + xcm::v3::Junction::Parachain(3000), + xcm::v3::Junction::PalletInstance(53), + xcm::v3::Junction::GeneralIndex(seed.into()), + ), + ); + (asset_id, asset_id) + } + + fn setup_balances_and_pool(asset_id: xcm::v3::MultiLocation, account: AccountId) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + assert_ok!(ForeignAssets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + account.clone().into(), /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = account.clone(); + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&lp_provider, u64::MAX.into()); + assert_ok!(ForeignAssets::mint_into(asset_id.into(), &lp_provider, u64::MAX.into())); + + let token_native = Box::new(TokenLocationV3::get()); + let token_second = Box::new(asset_id); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider.clone()), + token_native.clone(), + token_second.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider.clone()), + token_native, + token_second, + (u32::MAX / 8).into(), // 1 desired + u32::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider, + )); + } +} + #[cfg(feature = "runtime-benchmarks")] mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_assets, Local] [pallet_assets, Foreign] [pallet_assets, Pool] [pallet_asset_conversion, AssetConversion] + [pallet_asset_conversion_tx_payment, AssetTxPayment] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -1052,6 +1120,7 @@ mod benches { [pallet_uniques, Uniques] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -1302,6 +1371,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; @@ -1336,6 +1406,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 00000000000..ed2c5f3056e --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ +// --chain=asset-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs index fa9e86102c6..134b5341a40 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs @@ -19,7 +19,9 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_asset_conversion; +pub mod pallet_asset_conversion_tx_payment; pub mod pallet_assets_foreign; pub mod pallet_assets_local; pub mod pallet_assets_pool; @@ -32,6 +34,7 @@ pub mod pallet_nfts; pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_uniques; pub mod pallet_utility; pub mod pallet_xcm; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_tx_payment.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_tx_payment.rs new file mode 100644 index 00000000000..0a639b368af --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_tx_payment.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_asset_conversion_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2024-01-04, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Georges-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_asset_conversion_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ +// --chain=asset-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_conversion_tx_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_conversion_tx_payment::WeightInfo for WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(10_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 209_000_000 picoseconds. + Weight::from_parts(212_000_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `631` + // Estimated: `7404` + // Minimum execution time: 1_228_000_000 picoseconds. + Weight::from_parts(1_268_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 00000000000..035f9a6dbe5 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ +// --chain=asset-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 78c48507a7a..c10e02bd8a6 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -110,6 +110,7 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "hex-literal", + "pallet-asset-conversion-tx-payment/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", @@ -120,6 +121,7 @@ runtime-benchmarks = [ "pallet-nfts/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-uniques/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 1ead2897855..5246828da31 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -162,6 +162,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; @@ -218,6 +219,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -735,6 +737,9 @@ impl pallet_asset_conversion_tx_payment::Config for Runtime { type Fungibles = LocalAndForeignAssets; type OnChargeAssetTransaction = AssetConversionAdapter; + type WeightInfo = weights::pallet_asset_conversion_tx_payment::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetConversionTxHelper; } parameter_types! { @@ -920,8 +925,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -933,7 +938,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -1065,14 +1070,77 @@ pub type Executive = frame_executive::Executive< Migrations, >; +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetConversionTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl + pallet_asset_conversion_tx_payment::BenchmarkHelperTrait< + AccountId, + xcm::v3::MultiLocation, + xcm::v3::MultiLocation, + > for AssetConversionTxHelper +{ + fn create_asset_id_parameter(seed: u32) -> (xcm::v3::MultiLocation, xcm::v3::MultiLocation) { + // Use a different parachain' foreign assets pallet so that the asset is indeed foreign. + let asset_id = xcm::v3::MultiLocation::new( + 1, + xcm::v3::Junctions::X3( + xcm::v3::Junction::Parachain(3000), + xcm::v3::Junction::PalletInstance(53), + xcm::v3::Junction::GeneralIndex(seed.into()), + ), + ); + (asset_id, asset_id) + } + + fn setup_balances_and_pool(asset_id: xcm::v3::MultiLocation, account: AccountId) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + assert_ok!(ForeignAssets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + account.clone().into(), /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = account.clone(); + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&lp_provider, u64::MAX.into()); + assert_ok!(ForeignAssets::mint_into(asset_id.into(), &lp_provider, u64::MAX.into())); + + let token_native = Box::new(xcm::v3::MultiLocation::new(1, xcm::v3::Junctions::Here)); + let token_second = Box::new(asset_id); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider.clone()), + token_native.clone(), + token_second.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider.clone()), + token_native, + token_second, + (u32::MAX / 2).into(), // 1 desired + u32::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider, + )); + } +} + #[cfg(feature = "runtime-benchmarks")] mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_assets, Local] [pallet_assets, Foreign] [pallet_assets, Pool] [pallet_asset_conversion, AssetConversion] + [pallet_asset_conversion_tx_payment, AssetTxPayment] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -1083,6 +1151,7 @@ mod benches { [pallet_uniques, Uniques] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -1379,6 +1448,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; @@ -1413,6 +1483,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 00000000000..46f4f2bfd96 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --chain=asset-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_206_000 picoseconds. + Weight::from_parts(6_212_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_851_000 picoseconds. + Weight::from_parts(8_847_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 631_000 picoseconds. + Weight::from_parts(3_086_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_446_000 picoseconds. + Weight::from_parts(5_911_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 481_000 picoseconds. + Weight::from_parts(2_916_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_927_000 picoseconds. + Weight::from_parts(6_613_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs index 2f1fcfb05f3..691ed2e5730 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs @@ -18,7 +18,9 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_asset_conversion; +pub mod pallet_asset_conversion_tx_payment; pub mod pallet_assets_foreign; pub mod pallet_assets_local; pub mod pallet_assets_pool; @@ -31,6 +33,7 @@ pub mod pallet_nfts; pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_uniques; pub mod pallet_utility; pub mod pallet_xcm; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_tx_payment.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_tx_payment.rs new file mode 100644 index 00000000000..8fe302630fb --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_tx_payment.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_asset_conversion_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2024-01-04, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Georges-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_asset_conversion_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --chain=asset-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_conversion_tx_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_conversion_tx_payment::WeightInfo for WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 214_000_000 picoseconds. + Weight::from_parts(219_000_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `631` + // Estimated: `7404` + // Minimum execution time: 1_211_000_000 picoseconds. + Weight::from_parts(1_243_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 00000000000..b4c78a78b48 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --chain=asset-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 40_847_000 picoseconds. + Weight::from_parts(49_674_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 7a1951fd24b..242627815d3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -242,6 +242,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm-bridge-hub/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs index 6dbf96edc2a..323e6ed65e6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs @@ -40,7 +40,7 @@ use bridge_runtime_common::{ XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, }, refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedGrandpaMessages, RefundSignedExtensionAdapter, + ActualFeeRefund, RefundBridgedGrandpaMessages, RefundTransactionExtensionAdapter, RefundableMessagesLane, }, }; @@ -168,7 +168,7 @@ impl messages::BridgedChainWithMessages for RococoBulletin {} /// Signed extension that refunds relayers that are delivering messages from the Rococo Bulletin /// chain. -pub type OnBridgeHubRococoRefundRococoBulletinMessages = RefundSignedExtensionAdapter< +pub type OnBridgeHubRococoRefundRococoBulletinMessages = RefundTransactionExtensionAdapter< RefundBridgedGrandpaMessages< Runtime, BridgeGrandpaRococoBulletinInstance, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs index 5d55d7afbac..05d1aa188d2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs @@ -39,7 +39,7 @@ use bridge_runtime_common::{ XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, }, refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, + ActualFeeRefund, RefundBridgedParachainMessages, RefundTransactionExtensionAdapter, RefundableMessagesLane, RefundableParachain, }, }; @@ -173,7 +173,7 @@ impl UnderlyingChainProvider for BridgeHubWestend { impl messages::BridgedChainWithMessages for BridgeHubWestend {} /// Signed extension that refunds relayers that are delivering messages from the Westend parachain. -pub type OnBridgeHubRococoRefundBridgeHubWestendMessages = RefundSignedExtensionAdapter< +pub type OnBridgeHubRococoRefundBridgeHubWestendMessages = RefundTransactionExtensionAdapter< RefundBridgedParachainMessages< Runtime, RefundableParachain< diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 0b48d1717fa..16e3b2e8600 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -115,8 +115,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -134,7 +134,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -262,6 +262,8 @@ impl frame_system::Config for Runtime { type DbWeight = RocksDbWeight; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; + /// Weight information for the extensions of this pallet. + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; /// Block & extrinsics weights: base values and limits. type BlockWeights = RuntimeBlockWeights; /// The maximum length of a block (in bytes). @@ -324,6 +326,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -759,12 +762,14 @@ bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -1065,6 +1070,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -1095,6 +1101,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -1478,16 +1485,16 @@ mod tests { use codec::Encode; use sp_runtime::{ generic::Era, - traits::{SignedExtension, Zero}, + traits::{TransactionExtensionBase, Zero}, }; #[test] fn ensure_signed_extension_definition_is_compatible_with_relay() { - use bp_polkadot_core::SuffixedCommonSignedExtensionExt; + use bp_polkadot_core::SuffixedCommonTransactionExtensionExt; sp_io::TestExternalities::default().execute_with(|| { frame_system::BlockHash::::insert(BlockNumber::zero(), Hash::default()); - let payload: SignedExtra = ( + let payload: TxExtension = ( frame_system::CheckNonZeroSender::new(), frame_system::CheckSpecVersion::new(), frame_system::CheckTxVersion::new(), @@ -1501,11 +1508,11 @@ mod tests { bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), ) - ); + ).into(); // for BridgeHubRococo { - let bhr_indirect_payload = bp_bridge_hub_rococo::SignedExtension::from_params( + let bhr_indirect_payload = bp_bridge_hub_rococo::TransactionExtension::from_params( VERSION.spec_version, VERSION.transaction_version, bp_runtime::TransactionEra::Immortal, @@ -1516,8 +1523,8 @@ mod tests { ); assert_eq!(payload.encode(), bhr_indirect_payload.encode()); assert_eq!( - payload.additional_signed().unwrap().encode(), - bhr_indirect_payload.additional_signed().unwrap().encode() + payload.implicit().unwrap().encode(), + bhr_indirect_payload.implicit().unwrap().encode() ) } }); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 00000000000..99e3d6aeba0 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --chain=bridge-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_136_000 picoseconds. + Weight::from_parts(5_842_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_771_000 picoseconds. + Weight::from_parts(8_857_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 732_000 picoseconds. + Weight::from_parts(2_875_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_627_000 picoseconds. + Weight::from_parts(6_322_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 471_000 picoseconds. + Weight::from_parts(2_455_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 491_000 picoseconds. + Weight::from_parts(2_916_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_798_000 picoseconds. + Weight::from_parts(6_272_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs index aac39a4564f..d97a30db954 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs @@ -25,6 +25,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_bridge_grandpa; pub mod pallet_bridge_messages_rococo_to_rococo_bulletin; @@ -36,6 +37,7 @@ pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 00000000000..71d17e7259f --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --chain=bridge-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `3` + // Estimated: `3593` + // Minimum execution time: 34_956_000 picoseconds. + Weight::from_parts(40_788_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs index 239bd946e75..46c5df18a15 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs @@ -22,7 +22,7 @@ use bridge_hub_rococo_runtime::{ bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages, xcm_config::XcmConfig, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, MessageQueueServiceWeight, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, - SignedExtra, UncheckedExtrinsic, + TxExtension, UncheckedExtrinsic, }; use codec::{Decode, Encode}; use cumulus_primitives_core::XcmError::{FailedToTransactAsset, NotHoldingFees}; @@ -171,7 +171,7 @@ fn construct_extrinsic( call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -188,13 +188,13 @@ fn construct_extrinsic( OnBridgeHubRococoRefundRococoBulletinMessages::default(), ), ); - let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); + let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); UncheckedExtrinsic::new_signed( call, account_id.into(), Signature::Sr25519(signature.clone()), - extra, + tx_ext, ) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index f11954cf165..565343c55a5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -22,7 +22,7 @@ use bridge_hub_rococo_runtime::{ xcm_config::{RelayNetwork, TokenLocation, XcmConfig}, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, EthereumGatewayAddress, Executive, ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, - RuntimeEvent, RuntimeOrigin, SessionKeys, SignedExtra, TransactionPayment, UncheckedExtrinsic, + RuntimeEvent, RuntimeOrigin, SessionKeys, TransactionPayment, TxExtension, UncheckedExtrinsic, }; use bridge_hub_test_utils::SlotDurations; use codec::{Decode, Encode}; @@ -48,7 +48,7 @@ fn construct_extrinsic( call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -64,14 +64,15 @@ fn construct_extrinsic( bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), ), - ); - let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); + ) + .into(); + let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); UncheckedExtrinsic::new_signed( call, account_id.into(), Signature::Sr25519(signature.clone()), - extra, + tx_ext, ) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 8623f7cb366..b5fe093b8be 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -204,6 +204,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm-bridge-hub/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs index bce722aa5f8..934dce5c2c4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs @@ -36,7 +36,7 @@ use bridge_runtime_common::{ XcmBlobMessageDispatch, XcmVersionOfDestAndRemoteBridge, }, refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, + ActualFeeRefund, RefundBridgedParachainMessages, RefundTransactionExtensionAdapter, RefundableMessagesLane, RefundableParachain, }, }; @@ -190,7 +190,7 @@ impl ThisChainWithMessages for BridgeHubWestend { } /// Signed extension that refunds relayers that are delivering messages from the Rococo parachain. -pub type OnBridgeHubWestendRefundBridgeHubRococoMessages = RefundSignedExtensionAdapter< +pub type OnBridgeHubWestendRefundBridgeHubRococoMessages = RefundTransactionExtensionAdapter< RefundBridgedParachainMessages< Runtime, RefundableParachain, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index e1344fce63d..86ed8b2f488 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -97,8 +97,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -113,7 +113,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -298,6 +298,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -515,12 +516,14 @@ bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -761,6 +764,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -790,6 +794,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -1135,16 +1140,16 @@ mod tests { use codec::Encode; use sp_runtime::{ generic::Era, - traits::{SignedExtension, Zero}, + traits::{TransactionExtensionBase, Zero}, }; #[test] fn ensure_signed_extension_definition_is_compatible_with_relay() { - use bp_polkadot_core::SuffixedCommonSignedExtensionExt; + use bp_polkadot_core::SuffixedCommonTransactionExtensionExt; sp_io::TestExternalities::default().execute_with(|| { frame_system::BlockHash::::insert(BlockNumber::zero(), Hash::default()); - let payload: SignedExtra = ( + let payload: TxExtension = ( frame_system::CheckNonZeroSender::new(), frame_system::CheckSpecVersion::new(), frame_system::CheckTxVersion::new(), @@ -1157,10 +1162,10 @@ mod tests { ( bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(), ), - ); + ).into(); { - let bh_indirect_payload = bp_bridge_hub_westend::SignedExtension::from_params( + let bh_indirect_payload = bp_bridge_hub_westend::TransactionExtension::from_params( VERSION.spec_version, VERSION.transaction_version, bp_runtime::TransactionEra::Immortal, @@ -1171,8 +1176,8 @@ mod tests { ); assert_eq!(payload.encode(), bh_indirect_payload.encode()); assert_eq!( - payload.additional_signed().unwrap().encode(), - bh_indirect_payload.additional_signed().unwrap().encode() + payload.implicit().unwrap().encode(), + bh_indirect_payload.implicit().unwrap().encode() ) } }); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 00000000000..06f1114b4de --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/ +// --chain=bridge-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_166_000 picoseconds. + Weight::from_parts(6_021_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_651_000 picoseconds. + Weight::from_parts(9_177_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 601_000 picoseconds. + Weight::from_parts(2_805_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_727_000 picoseconds. + Weight::from_parts(6_051_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 471_000 picoseconds. + Weight::from_parts(2_494_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 521_000 picoseconds. + Weight::from_parts(2_655_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_808_000 picoseconds. + Weight::from_parts(6_402_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs index a65ee31d3e5..e23033e0dfd 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs @@ -25,6 +25,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_bridge_grandpa; pub mod pallet_bridge_messages; @@ -35,6 +36,7 @@ pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 00000000000..92c53b91879 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/ +// --chain=bridge-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `3` + // Estimated: `3593` + // Minimum execution time: 40_286_000 picoseconds. + Weight::from_parts(45_816_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 149a3bbeb75..7ea22befe95 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -24,7 +24,7 @@ use bridge_hub_westend_runtime::{ xcm_config::{RelayNetwork, WestendLocation, XcmConfig}, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, - SignedExtra, TransactionPayment, UncheckedExtrinsic, + TransactionPayment, TxExtension, UncheckedExtrinsic, }; use bridge_to_rococo_config::{ BridgeGrandpaRococoInstance, BridgeHubRococoChainId, BridgeHubRococoLocation, @@ -65,7 +65,7 @@ fn construct_extrinsic( call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -78,14 +78,15 @@ fn construct_extrinsic( pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),), - ); - let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); + ) + .into(); + let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); UncheckedExtrinsic::new_signed( call, account_id.into(), Signature::Sr25519(signature.clone()), - extra, + tx_ext, ) } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index ed264f28c26..32d7e97bc45 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -117,6 +117,7 @@ runtime-benchmarks = [ "pallet-salary/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 6bcf98c428f..3887a1d74ec 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -175,6 +175,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = ConstU16<0>; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; @@ -231,6 +232,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -707,8 +709,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -719,7 +721,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// All migrations executed on runtime upgrade as a nested tuple of types implementing /// `OnRuntimeUpgrade`. Included migrations must be idempotent. type Migrations = ( @@ -745,6 +747,7 @@ pub type Executive = frame_executive::Executive< mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -752,6 +755,7 @@ mod benches { [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -955,6 +959,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -972,6 +977,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 00000000000..f3030cc4f6a --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ +// --chain=collectives-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_497_000 picoseconds. + Weight::from_parts(5_961_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_240_000 picoseconds. + Weight::from_parts(8_175_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 671_000 picoseconds. + Weight::from_parts(3_005_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_426_000 picoseconds. + Weight::from_parts(6_131_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_715_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 491_000 picoseconds. + Weight::from_parts(2_635_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_958_000 picoseconds. + Weight::from_parts(6_753_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs index a9a298e547e..00b3bd92d5e 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs @@ -18,6 +18,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_alliance; pub mod pallet_asset_rate; pub mod pallet_balances; @@ -39,6 +40,7 @@ pub mod pallet_salary_fellowship_salary; pub mod pallet_scheduler; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_xcm; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 00000000000..5d077b89d56 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ +// --chain=collectives-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 39_815_000 picoseconds. + Weight::from_parts(46_067_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index dcc6c4e853a..747f75cf2e2 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -158,6 +158,7 @@ runtime-benchmarks = [ "pallet-multisig/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 0668b9a4c7d..8ce46ccd34f 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -80,8 +80,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -93,7 +93,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -240,6 +240,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } parameter_types! { @@ -423,6 +424,7 @@ construct_runtime!( mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -686,6 +688,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -703,6 +706,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index 0bc3b510ed5..70a6a81bdcf 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -156,6 +156,7 @@ runtime-benchmarks = [ "pallet-multisig/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index cdff371c505..9913e47a93a 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -89,8 +89,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -103,7 +103,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -192,6 +192,8 @@ impl frame_system::Config for Runtime { type DbWeight = RocksDbWeight; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; + /// Weight information for the extensions of this pallet. + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; /// Block & extrinsics weights: base values and limits. type BlockWeights = RuntimeBlockWeights; /// The maximum length of a block (in bytes). @@ -251,6 +253,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 00000000000..aac73680ad1 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ +// --chain=coretime-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs index f1050b3ae63..7b6ab611e6f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs @@ -22,6 +22,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_broker; pub mod pallet_collator_selection; @@ -29,6 +30,7 @@ pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 00000000000..29d48abab89 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ +// --chain=coretime-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index a7d52dfd784..549ef7ce4d7 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -153,6 +153,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 7fa70647992..1a330821d3f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -89,8 +89,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -103,7 +103,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -192,6 +192,8 @@ impl frame_system::Config for Runtime { type DbWeight = RocksDbWeight; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; + /// Weight information for the extensions of this pallet. + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; /// Block & extrinsics weights: base values and limits. type BlockWeights = RuntimeBlockWeights; /// The maximum length of a block (in bytes). @@ -251,6 +253,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 00000000000..0683158a01a --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ +// --chain=coretime-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs index f1050b3ae63..7b6ab611e6f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs @@ -22,6 +22,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_broker; pub mod pallet_collator_selection; @@ -29,6 +30,7 @@ pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 00000000000..f159f877afe --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ +// --chain=coretime-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 232a82115a8..199057c3764 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -289,8 +289,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( pallet_sudo::CheckOnlySudoAccount, frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, @@ -301,7 +301,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -316,6 +316,7 @@ mod benches { frame_benchmarking::define_benchmarks!( [cumulus_pallet_parachain_system, ParachainSystem] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_glutton, Glutton] [pallet_message_queue, MessageQueue] [pallet_timestamp, Timestamp] @@ -439,6 +440,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -455,6 +457,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 00000000000..e6efa762308 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,119 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ +// --chain=glutton-westend-dev-1300 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_908_000 picoseconds. + Weight::from_parts(4_007_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_510_000 picoseconds. + Weight::from_parts(6_332_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 651_000 picoseconds. + Weight::from_parts(851_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_387_000 picoseconds. + Weight::from_parts(3_646_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 491_000 picoseconds. + Weight::from_parts(651_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 451_000 picoseconds. + Weight::from_parts(662_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 3_537_000 picoseconds. + Weight::from_parts(4_208_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index c0b8fb7636b..c9781639c3e 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -152,6 +152,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 2b8cc32f67c..c5e420785de 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -83,8 +83,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -97,7 +97,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -178,6 +178,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = ConstU32<16>; @@ -232,6 +233,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -453,6 +455,7 @@ mod benches { [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] // Polkadot [polkadot_runtime_common::identity_migrator, IdentityMigrator] // Cumulus diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 00000000000..1ba2010991f --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/ +// --chain=people-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs index 3396a8caea0..c4cea99ee5a 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs @@ -20,6 +20,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_collator_selection; pub mod pallet_identity; @@ -27,6 +28,7 @@ pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 00000000000..555fd5a32fa --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/ +// --chain=people-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index e87e825a34e..8d8421332c1 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -152,6 +152,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 2dc2d06a66b..dea079a4a98 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -83,8 +83,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The transactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -97,7 +97,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -178,6 +178,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = ConstU32<16>; @@ -232,6 +233,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 00000000000..7130a62ab6a --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,121 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/ +// --chain=people-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs index 3396a8caea0..c4cea99ee5a 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs @@ -20,6 +20,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_collator_selection; pub mod pallet_identity; @@ -27,6 +28,7 @@ pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 00000000000..30e4524e586 --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/ +// --chain=people-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs index 4cc0a81ef49..023c9978302 100644 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs @@ -258,8 +258,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckSpecVersion, frame_system::CheckTxVersion, frame_system::CheckGenesis, @@ -269,7 +269,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index 829754731a4..fedac36e48d 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -34,15 +34,19 @@ pub mod xcm_config; use codec::{Decode, Encode}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::AggregateMessageOrigin; -use frame_support::unsigned::TransactionValidityError; use scale_info::TypeInfo; use sp_api::impl_runtime_apis; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, DispatchInfoOf}, - transaction_validity::{TransactionSource, TransactionValidity}, + traits::{ + AccountIdLookup, BlakeTwo256, Block as BlockT, DispatchInfoOf, OriginOf, + TransactionExtension, TransactionExtensionBase, ValidateResult, + }, + transaction_validity::{ + InvalidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, + }, ApplyExtrinsicResult, }; use sp_std::prelude::*; @@ -275,35 +279,37 @@ construct_runtime! { /// Simple implementation which fails any transaction which is signed. #[derive(Eq, PartialEq, Clone, Default, sp_core::RuntimeDebug, Encode, Decode, TypeInfo)] pub struct DisallowSigned; -impl sp_runtime::traits::SignedExtension for DisallowSigned { + +impl TransactionExtensionBase for DisallowSigned { const IDENTIFIER: &'static str = "DisallowSigned"; - type AccountId = AccountId; - type Call = RuntimeCall; - type AdditionalSigned = (); + type Implicit = (); +} + +impl TransactionExtension for DisallowSigned { + type Val = (); type Pre = (); - fn additional_signed( + fn validate( &self, - ) -> sp_std::result::Result<(), sp_runtime::transaction_validity::TransactionValidityError> { - Ok(()) + _origin: OriginOf, + _call: &RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + _context: &mut C, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + Err(InvalidTransaction::BadProof.into()) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } - fn validate( - &self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, + _val: Self::Val, + _origin: &OriginOf, + _call: &RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { - let i = sp_runtime::transaction_validity::InvalidTransaction::BadProof; - Err(sp_runtime::transaction_validity::TransactionValidityError::Invalid(i)) + _context: &C, + ) -> Result { + Err(InvalidTransaction::BadProof.into()) } } @@ -323,11 +329,11 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = DisallowSigned; +/// The extension to the basic transaction logic. +pub type TxExtension = DisallowSigned; /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 08e5987d43a..bca6aca051f 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -158,6 +158,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 0030287edb3..62065619e42 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -114,8 +114,8 @@ pub type BlockId = generic::BlockId; // Id used for identifying assets. pub type AssetId = u32; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -128,7 +128,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; pub type Migrations = ( pallet_balances::migration::MigrateToTrackInactive, @@ -419,6 +419,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = (); } parameter_types! { @@ -608,6 +609,19 @@ impl pallet_collator_selection::Config for Runtime { type WeightInfo = (); } +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl pallet_asset_tx_payment::BenchmarkHelperTrait for AssetTxHelper { + fn create_asset_id_parameter(_id: u32) -> (u32, u32) { + unimplemented!("Penpal uses default weights"); + } + fn setup_balances_and_pool(_asset_id: u32, _account: AccountId) { + unimplemented!("Penpal uses default weights"); + } +} + impl pallet_asset_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; @@ -620,6 +634,9 @@ impl pallet_asset_tx_payment::Config for Runtime { >, AssetsToBlockAuthor, >; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetTxHelper; } impl pallet_sudo::Config for Runtime { @@ -668,6 +685,7 @@ construct_runtime!( mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_session, SessionBench::] @@ -851,6 +869,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; let mut list = Vec::::new(); @@ -867,6 +886,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime {} use cumulus_pallet_session_benchmarking::Pallet as SessionBench; diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index 42169e8949f..2023d9abf5d 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -126,6 +126,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 2b21a12c3ca..469f06a1aa6 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -267,6 +267,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = (); } impl pallet_sudo::Config for Runtime { @@ -644,8 +645,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -657,7 +658,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 84a232e954f..a92e8a4c12a 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -136,6 +136,7 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "glutton-westend-runtime/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "parachains-common/runtime-benchmarks", "penpal-runtime/runtime-benchmarks", "people-rococo-runtime/runtime-benchmarks", diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index 4835fb5192b..9e8f60783d4 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -14,9 +14,12 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } +sp-core = { path = "../../../substrate/primitives/core", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } @@ -26,19 +29,27 @@ docify = "0.2.7" [dev-dependencies] sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } -sp-io = { path = "../../../substrate/primitives/io", default-features = false } cumulus-test-runtime = { path = "../../test/runtime" } [features] default = ["std"] +runtime-benchmarks = [ + "cumulus-primitives-core/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] std = [ "codec/std", "cumulus-primitives-core/std", "cumulus-primitives-proof-size-hostfunction/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", "scale-info/std", + "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", diff --git a/cumulus/primitives/storage-weight-reclaim/src/benchmarking.rs b/cumulus/primitives/storage-weight-reclaim/src/benchmarking.rs new file mode 100644 index 00000000000..2980d08271a --- /dev/null +++ b/cumulus/primitives/storage-weight-reclaim/src/benchmarking.rs @@ -0,0 +1,70 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarking setup for cumulus-primitives-storage-weight-reclaim + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; + +use frame_benchmarking::{account, v2::*, BenchmarkError}; +use frame_support::pallet_prelude::DispatchClass; +use frame_system::{BlockWeight, RawOrigin}; +use sp_runtime::traits::{DispatchTransaction, Get}; +use sp_std::{ + marker::{Send, Sync}, + prelude::*, +}; + +/// Pallet we're benchmarking here. +pub struct Pallet(frame_system::Pallet); + +#[benchmarks(where + T: Send + Sync, + T::RuntimeCall: Dispatchable +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn storage_weight_reclaim() -> Result<(), BenchmarkError> { + let caller = account("caller", 0, 0); + BlockWeight::::mutate(|current_weight| { + current_weight.set(Weight::from_parts(0, 1000), DispatchClass::Normal); + }); + let base_extrinsic = ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(0, 200)), + pays_fee: Default::default(), + }; + let len = 0_usize; + let ext = StorageWeightReclaim::::new(); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(post_info)) + .unwrap() + .unwrap(); + } + + assert_eq!(BlockWeight::::get().total().proof_size(), 700 + base_extrinsic.proof_size()); + + Ok(()) + } +} diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs index 5dddc92e395..4bb40176b78 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -29,12 +29,22 @@ use frame_support::{ use frame_system::Config; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension}, + impl_tx_ext_default, + traits::{ + DispatchInfoOf, Dispatchable, PostDispatchInfoOf, TransactionExtension, + TransactionExtensionBase, + }, transaction_validity::TransactionValidityError, DispatchResult, }; use sp_std::marker::PhantomData; +#[cfg(test)] +mod tests; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + const LOG_TARGET: &'static str = "runtime::storage_reclaim"; /// `StorageWeightReclaimer` is a mechanism for manually reclaiming storage weight. @@ -43,7 +53,7 @@ const LOG_TARGET: &'static str = "runtime::storage_reclaim"; /// reclaim it computes the real consumed storage weight and refunds excess weight. /// /// # Example -#[doc = docify::embed!("src/lib.rs", simple_reclaimer_example)] +#[doc = docify::embed!("src/tests.rs", simple_reclaimer_example)] pub struct StorageWeightReclaimer { previous_remaining_proof_size: u64, previous_reported_proof_size: Option, @@ -119,42 +129,40 @@ impl core::fmt::Debug for StorageWeightReclaim { } } -impl SignedExtension for StorageWeightReclaim +impl TransactionExtensionBase for StorageWeightReclaim { + const IDENTIFIER: &'static str = "StorageWeightReclaim"; + type Implicit = (); +} + +impl TransactionExtension + for StorageWeightReclaim where T::RuntimeCall: Dispatchable, { - const IDENTIFIER: &'static str = "StorageWeightReclaim"; - - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); + type Val = (); type Pre = Option; - fn additional_signed( - &self, - ) -> Result - { - Ok(()) - } - - fn pre_dispatch( + fn prepare( self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, + _val: Self::Val, + _origin: &T::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> Result { + _context: &Context, + ) -> Result { Ok(get_proof_size()) } fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, _len: usize, _result: &DispatchResult, + _context: &Context, ) -> Result<(), TransactionValidityError> { - let Some(Some(pre_dispatch_proof_size)) = pre else { + let Some(pre_dispatch_proof_size) = pre else { return Ok(()); }; @@ -194,470 +202,6 @@ where }); Ok(()) } -} - -#[cfg(test)] -mod tests { - use super::*; - use frame_support::{ - assert_ok, - dispatch::DispatchClass, - weights::{Weight, WeightMeter}, - }; - use frame_system::{BlockWeight, CheckWeight}; - use sp_runtime::{AccountId32, BuildStorage}; - use sp_std::marker::PhantomData; - use sp_trie::proof_size_extension::ProofSizeExt; - - type Test = cumulus_test_runtime::Runtime; - const CALL: &::RuntimeCall = - &cumulus_test_runtime::RuntimeCall::System(frame_system::Call::set_heap_pages { - pages: 0u64, - }); - const ALICE: AccountId32 = AccountId32::new([1u8; 32]); - const LEN: usize = 0; - - pub fn new_test_ext() -> sp_io::TestExternalities { - let ext: sp_io::TestExternalities = cumulus_test_runtime::RuntimeGenesisConfig::default() - .build_storage() - .unwrap() - .into(); - ext - } - - struct TestRecorder { - return_values: Box<[usize]>, - counter: std::sync::atomic::AtomicUsize, - } - - impl TestRecorder { - fn new(values: &[usize]) -> Self { - TestRecorder { return_values: values.into(), counter: Default::default() } - } - } - - impl sp_trie::ProofSizeProvider for TestRecorder { - fn estimate_encoded_size(&self) -> usize { - let counter = self.counter.fetch_add(1, core::sync::atomic::Ordering::Relaxed); - self.return_values[counter] - } - } - - fn setup_test_externalities(proof_values: &[usize]) -> sp_io::TestExternalities { - let mut test_ext = new_test_ext(); - let test_recorder = TestRecorder::new(proof_values); - test_ext.register_extension(ProofSizeExt::new(test_recorder)); - test_ext - } - - fn set_current_storage_weight(new_weight: u64) { - BlockWeight::::mutate(|current_weight| { - current_weight.set(Weight::from_parts(0, new_weight), DispatchClass::Normal); - }); - } - - #[test] - fn basic_refund() { - // The real cost will be 100 bytes of storage size - let mut test_ext = setup_test_externalities(&[0, 100]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 500 - let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(0)); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - // We expect a refund of 400 - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(BlockWeight::::get().total().proof_size(), 600); - }) - } - - #[test] - fn does_nothing_without_extension() { - let mut test_ext = new_test_ext(); - - // Proof size extension not registered - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 500 - let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, None); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(BlockWeight::::get().total().proof_size(), 1000); - }) - } - - #[test] - fn negative_refund_is_added_to_weight() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - // Benchmarked storage weight: 100 - let info = DispatchInfo { weight: Weight::from_parts(0, 100), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - // We expect no refund - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(BlockWeight::::get().total().proof_size(), 1100); - }) - } - - #[test] - fn test_zero_proof_size() { - let mut test_ext = setup_test_externalities(&[0, 0]); - - test_ext.execute_with(|| { - let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(0)); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(BlockWeight::::get().total().proof_size(), 0); - }); - } - - #[test] - fn test_larger_pre_dispatch_proof_size() { - let mut test_ext = setup_test_externalities(&[300, 100]); - - test_ext.execute_with(|| { - set_current_storage_weight(1300); - - let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(300)); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(BlockWeight::::get().total().proof_size(), 800); - }); - } - - #[test] - fn test_incorporates_check_weight_unspent_weight() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 300 - let info = DispatchInfo { weight: Weight::from_parts(100, 300), ..Default::default() }; - - // Actual weight is 50 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 250)), - pays_fee: Default::default(), - }; - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(BlockWeight::::get().total().proof_size(), 900); - }) - } - - #[test] - fn test_incorporates_check_weight_unspent_weight_on_negative() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - // Benchmarked storage weight: 50 - let info = DispatchInfo { weight: Weight::from_parts(100, 50), ..Default::default() }; - - // Actual weight is 25 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 25)), - pays_fee: Default::default(), - }; - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(BlockWeight::::get().total().proof_size(), 1150); - }) - } - - #[test] - fn test_incorporates_check_weight_unspent_weight_reverse_order() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 300 - let info = DispatchInfo { weight: Weight::from_parts(100, 300), ..Default::default() }; - - // Actual weight is 50 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 250)), - pays_fee: Default::default(), - }; - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - // `CheckWeight` gets called after `StorageWeightReclaim` this time. - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - - assert_eq!(BlockWeight::::get().total().proof_size(), 900); - }) - } - - #[test] - fn test_incorporates_check_weight_unspent_weight_on_negative_reverse_order() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - // Benchmarked storage weight: 50 - let info = DispatchInfo { weight: Weight::from_parts(100, 50), ..Default::default() }; - // Actual weight is 25 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 25)), - pays_fee: Default::default(), - }; - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - // `CheckWeight` gets called after `StorageWeightReclaim` this time. - // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - - assert_eq!(BlockWeight::::get().total().proof_size(), 1150); - }) - } - - #[test] - fn storage_size_reported_correctly() { - let mut test_ext = setup_test_externalities(&[1000]); - test_ext.execute_with(|| { - assert_eq!(get_proof_size(), Some(1000)); - }); - - let mut test_ext = new_test_ext(); - - let test_recorder = TestRecorder::new(&[0]); - - test_ext.register_extension(ProofSizeExt::new(test_recorder)); - - test_ext.execute_with(|| { - assert_eq!(get_proof_size(), Some(0)); - }); - } - - #[test] - fn storage_size_disabled_reported_correctly() { - let mut test_ext = setup_test_externalities(&[PROOF_RECORDING_DISABLED as usize]); - - test_ext.execute_with(|| { - assert_eq!(get_proof_size(), None); - }); - } - - #[test] - fn test_reclaim_helper() { - let mut test_ext = setup_test_externalities(&[1000, 1300, 1800]); - - test_ext.execute_with(|| { - let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 2000)); - let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); - remaining_weight_meter.consume(Weight::from_parts(0, 500)); - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - - assert_eq!(reclaimed, Some(Weight::from_parts(0, 200))); - - remaining_weight_meter.consume(Weight::from_parts(0, 800)); - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - assert_eq!(reclaimed, Some(Weight::from_parts(0, 300))); - assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1200)); - }); - } - - #[test] - fn test_reclaim_helper_does_not_reclaim_negative() { - // Benchmarked weight does not change at all - let mut test_ext = setup_test_externalities(&[1000, 1300]); - - test_ext.execute_with(|| { - let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); - let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - - assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); - assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1000)); - }); - - // Benchmarked weight increases less than storage proof consumes - let mut test_ext = setup_test_externalities(&[1000, 1300]); - - test_ext.execute_with(|| { - let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); - let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); - remaining_weight_meter.consume(Weight::from_parts(0, 0)); - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - - assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); - }); - } - - /// Just here for doc purposes - fn get_benched_weight() -> Weight { - Weight::from_parts(0, 5) - } - - /// Just here for doc purposes - fn do_work() {} - - #[docify::export_content(simple_reclaimer_example)] - fn reclaim_with_weight_meter() { - let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(10, 10)); - - let benched_weight = get_benched_weight(); - - // It is important to instantiate the `StorageWeightReclaimer` before we consume the weight - // for a piece of work from the weight meter. - let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); - - if remaining_weight_meter.try_consume(benched_weight).is_ok() { - // Perform some work that takes has `benched_weight` storage weight. - do_work(); - - // Reclaimer will detect that we only consumed 2 bytes, so 3 bytes are reclaimed. - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - - // We reclaimed 3 bytes of storage size! - assert_eq!(reclaimed, Some(Weight::from_parts(0, 3))); - assert_eq!(BlockWeight::::get().total().proof_size(), 10); - assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(10, 8)); - } - } - - #[test] - fn test_reclaim_helper_works_with_meter() { - // The node will report 12 - 10 = 2 consumed storage size between the calls. - let mut test_ext = setup_test_externalities(&[10, 12]); - - test_ext.execute_with(|| { - // Initial storage size is 10. - set_current_storage_weight(10); - reclaim_with_weight_meter(); - }); - } + impl_tx_ext_default!(T::RuntimeCall; Context; validate); } diff --git a/cumulus/primitives/storage-weight-reclaim/src/tests.rs b/cumulus/primitives/storage-weight-reclaim/src/tests.rs new file mode 100644 index 00000000000..631827cf442 --- /dev/null +++ b/cumulus/primitives/storage-weight-reclaim/src/tests.rs @@ -0,0 +1,484 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use frame_support::{ + assert_ok, + dispatch::DispatchClass, + weights::{Weight, WeightMeter}, +}; +use frame_system::{BlockWeight, CheckWeight}; +use sp_runtime::{traits::DispatchTransaction, AccountId32, BuildStorage}; +use sp_std::marker::PhantomData; +use sp_trie::proof_size_extension::ProofSizeExt; + +type Test = cumulus_test_runtime::Runtime; +const CALL: &::RuntimeCall = + &cumulus_test_runtime::RuntimeCall::System(frame_system::Call::set_heap_pages { pages: 0u64 }); +const ALICE: AccountId32 = AccountId32::new([1u8; 32]); +const LEN: usize = 0; + +fn new_test_ext() -> sp_io::TestExternalities { + let ext: sp_io::TestExternalities = cumulus_test_runtime::RuntimeGenesisConfig::default() + .build_storage() + .unwrap() + .into(); + ext +} + +struct TestRecorder { + return_values: Box<[usize]>, + counter: std::sync::atomic::AtomicUsize, +} + +impl TestRecorder { + fn new(values: &[usize]) -> Self { + TestRecorder { return_values: values.into(), counter: Default::default() } + } +} + +impl sp_trie::ProofSizeProvider for TestRecorder { + fn estimate_encoded_size(&self) -> usize { + let counter = self.counter.fetch_add(1, core::sync::atomic::Ordering::Relaxed); + self.return_values[counter] + } +} + +fn setup_test_externalities(proof_values: &[usize]) -> sp_io::TestExternalities { + let mut test_ext = new_test_ext(); + let test_recorder = TestRecorder::new(proof_values); + test_ext.register_extension(ProofSizeExt::new(test_recorder)); + test_ext +} + +fn set_current_storage_weight(new_weight: u64) { + BlockWeight::::mutate(|current_weight| { + current_weight.set(Weight::from_parts(0, new_weight), DispatchClass::Normal); + }); +} + +#[test] +fn basic_refund() { + // The real cost will be 100 bytes of storage size + let mut test_ext = setup_test_externalities(&[0, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 500 + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(0)); + + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + // We expect a refund of 400 + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 600); + }) +} + +#[test] +fn does_nothing_without_extension() { + let mut test_ext = new_test_ext(); + + // Proof size extension not registered + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 500 + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, None); + + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 1000); + }) +} + +#[test] +fn negative_refund_is_added_to_weight() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 100 + let info = DispatchInfo { weight: Weight::from_parts(0, 100), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // We expect no refund + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 1100); + }) +} + +#[test] +fn test_zero_proof_size() { + let mut test_ext = setup_test_externalities(&[0, 0]); + + test_ext.execute_with(|| { + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(0)); + + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 0); + }); +} + +#[test] +fn test_larger_pre_dispatch_proof_size() { + let mut test_ext = setup_test_externalities(&[300, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(1300); + + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(300)); + + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 800); + }); +} + +#[test] +fn test_incorporates_check_weight_unspent_weight() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 300 + let info = DispatchInfo { weight: Weight::from_parts(100, 300), ..Default::default() }; + + // Actual weight is 50 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 250)), + pays_fee: Default::default(), + }; + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 900); + }) +} + +#[test] +fn test_incorporates_check_weight_unspent_weight_on_negative() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 50 + let info = DispatchInfo { weight: Weight::from_parts(100, 50), ..Default::default() }; + + // Actual weight is 25 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 25)), + pays_fee: Default::default(), + }; + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 1150); + }) +} + +#[test] +fn test_incorporates_check_weight_unspent_weight_reverse_order() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 300 + let info = DispatchInfo { weight: Weight::from_parts(100, 300), ..Default::default() }; + + // Actual weight is 50 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 250)), + pays_fee: Default::default(), + }; + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + // `CheckWeight` gets called after `StorageWeightReclaim` this time. + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + + assert_eq!(BlockWeight::::get().total().proof_size(), 900); + }) +} + +#[test] +fn test_incorporates_check_weight_unspent_weight_on_negative_reverse_order() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 50 + let info = DispatchInfo { weight: Weight::from_parts(100, 50), ..Default::default() }; + + // Actual weight is 25 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 25)), + pays_fee: Default::default(), + }; + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + assert_ok!(StorageWeightReclaim::::post_dispatch( + pre, + &info, + &post_info, + LEN, + &Ok(()), + &() + )); + // `CheckWeight` gets called after `StorageWeightReclaim` this time. + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch((), &info, &post_info, 0, &Ok(()), &())); + + assert_eq!(BlockWeight::::get().total().proof_size(), 1150); + }) +} + +#[test] +fn storage_size_reported_correctly() { + let mut test_ext = setup_test_externalities(&[1000]); + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), Some(1000)); + }); + + let mut test_ext = new_test_ext(); + + let test_recorder = TestRecorder::new(&[0]); + + test_ext.register_extension(ProofSizeExt::new(test_recorder)); + + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), Some(0)); + }); +} + +#[test] +fn storage_size_disabled_reported_correctly() { + let mut test_ext = setup_test_externalities(&[PROOF_RECORDING_DISABLED as usize]); + + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), None); + }); +} + +#[test] +fn test_reclaim_helper() { + let mut test_ext = setup_test_externalities(&[1000, 1300, 1800]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 2000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + remaining_weight_meter.consume(Weight::from_parts(0, 500)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 200))); + + remaining_weight_meter.consume(Weight::from_parts(0, 800)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + assert_eq!(reclaimed, Some(Weight::from_parts(0, 300))); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1200)); + }); +} + +#[test] +fn test_reclaim_helper_does_not_reclaim_negative() { + // Benchmarked weight does not change at all + let mut test_ext = setup_test_externalities(&[1000, 1300]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1000)); + }); + + // Benchmarked weight increases less than storage proof consumes + let mut test_ext = setup_test_externalities(&[1000, 1300]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + remaining_weight_meter.consume(Weight::from_parts(0, 0)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); + }); +} + +/// Just here for doc purposes +fn get_benched_weight() -> Weight { + Weight::from_parts(0, 5) +} + +/// Just here for doc purposes +fn do_work() {} + +#[docify::export_content(simple_reclaimer_example)] +fn reclaim_with_weight_meter() { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(10, 10)); + + let benched_weight = get_benched_weight(); + + // It is important to instantiate the `StorageWeightReclaimer` before we consume the weight + // for a piece of work from the weight meter. + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + + if remaining_weight_meter.try_consume(benched_weight).is_ok() { + // Perform some work that takes has `benched_weight` storage weight. + do_work(); + + // Reclaimer will detect that we only consumed 2 bytes, so 3 bytes are reclaimed. + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + // We reclaimed 3 bytes of storage size! + assert_eq!(reclaimed, Some(Weight::from_parts(0, 3))); + assert_eq!(BlockWeight::::get().total().proof_size(), 10); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(10, 8)); + } +} + +#[test] +fn test_reclaim_helper_works_with_meter() { + // The node will report 12 - 10 = 2 consumed storage size between the calls. + let mut test_ext = setup_test_externalities(&[10, 12]); + + test_ext.execute_with(|| { + // Initial storage size is 10. + set_current_storage_weight(10); + reclaim_with_weight_meter(); + }); +} diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 028733ce235..9dfa6ecb351 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -46,9 +46,11 @@ cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-w [features] runtime-benchmarks = [ "cumulus-primitives-core/runtime-benchmarks", + "cumulus-primitives-storage-weight-reclaim/runtime-benchmarks", "cumulus-test-service/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "sc-service/runtime-benchmarks", diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index c46f4da7f67..9239ff0f23e 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -19,7 +19,7 @@ mod block_builder; use codec::{Decode, Encode}; use runtime::{ - Balance, Block, BlockHashCount, Runtime, RuntimeCall, Signature, SignedExtra, SignedPayload, + Balance, Block, BlockHashCount, Runtime, RuntimeCall, Signature, SignedPayload, TxExtension, UncheckedExtrinsic, VERSION, }; use sc_executor::HeapAllocStrategy; @@ -125,7 +125,7 @@ impl DefaultTestClientBuilderExt for TestClientBuilder { /// Create an unsigned extrinsic from a runtime call. pub fn generate_unsigned(function: impl Into) -> UncheckedExtrinsic { - UncheckedExtrinsic::new_unsigned(function.into()) + UncheckedExtrinsic::new_bare(function.into()) } /// Create a signed extrinsic from a runtime call and sign @@ -143,7 +143,7 @@ pub fn generate_extrinsic_with_pair( let period = BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64; let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckGenesis::::new(), @@ -152,13 +152,14 @@ pub fn generate_extrinsic_with_pair( frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::::new(), - ); + ) + .into(); let function = function.into(); let raw_payload = SignedPayload::from_raw( function.clone(), - extra.clone(), + tx_ext.clone(), ((), VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()), ); let signature = raw_payload.using_encoded(|e| origin.sign(e)); @@ -167,7 +168,7 @@ pub fn generate_extrinsic_with_pair( function, origin.public().into(), Signature::Sr25519(signature), - extra, + tx_ext, ) } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 5ccec8983e9..154948a24b4 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -246,6 +246,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } impl pallet_sudo::Config for Runtime { @@ -322,8 +323,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckGenesis, @@ -335,7 +336,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -346,7 +347,7 @@ pub type Executive = frame_executive::Executive< TestOnRuntimeUpgrade, >; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; pub struct TestOnRuntimeUpgrade; diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 27273f4e0a8..39e8aeba433 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -104,10 +104,12 @@ substrate-test-utils = { path = "../../../substrate/test-utils" } runtime-benchmarks = [ "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-primitives-core/runtime-benchmarks", + "cumulus-primitives-storage-weight-reclaim/runtime-benchmarks", "cumulus-test-client/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", diff --git a/cumulus/test/service/src/bench_utils.rs b/cumulus/test/service/src/bench_utils.rs index 4ace894b392..15128b23787 100644 --- a/cumulus/test/service/src/bench_utils.rs +++ b/cumulus/test/service/src/bench_utils.rs @@ -69,7 +69,7 @@ pub fn extrinsic_set_time(client: &TestClient) -> OpaqueExtrinsic { let timestamp = best_number as u64 * cumulus_test_runtime::MinimumPeriod::get(); cumulus_test_runtime::UncheckedExtrinsic { - signature: None, + preamble: sp_runtime::generic::Preamble::Bare, function: cumulus_test_runtime::RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: timestamp, }), @@ -102,7 +102,7 @@ pub fn extrinsic_set_validation_data( }; cumulus_test_runtime::UncheckedExtrinsic { - signature: None, + preamble: sp_runtime::generic::Preamble::Bare, function: cumulus_test_runtime::RuntimeCall::ParachainSystem( cumulus_pallet_parachain_system::Call::set_validation_data { data }, ), diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 3554a383f21..f05dc5f180f 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -883,7 +883,7 @@ pub fn construct_extrinsic( .map(|c| c / 2) .unwrap_or(2) as u64; let tip = 0; - let extra: runtime::SignedExtra = ( + let tx_ext: runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckGenesis::::new(), @@ -895,10 +895,11 @@ pub fn construct_extrinsic( frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::::new(), - ); + ) + .into(); let raw_payload = runtime::SignedPayload::from_raw( function.clone(), - extra.clone(), + tx_ext.clone(), ((), runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()), ); let signature = raw_payload.using_encoded(|e| caller.sign(e)); @@ -906,7 +907,7 @@ pub fn construct_extrinsic( function, caller.public().into(), runtime::Signature::Sr25519(signature), - extra, + tx_ext, ) } diff --git a/docs/sdk/src/reference_docs/extrinsic_encoding.rs b/docs/sdk/src/reference_docs/extrinsic_encoding.rs index 8c8568a228f..be1af500038 100644 --- a/docs/sdk/src/reference_docs/extrinsic_encoding.rs +++ b/docs/sdk/src/reference_docs/extrinsic_encoding.rs @@ -56,7 +56,7 @@ //! version_and_signed, //! from_address, //! signature, -//! signed_extensions_extra, +//! transaction_extensions_extra, //! ) //! ``` //! @@ -90,31 +90,31 @@ //! The signature type used on the Polkadot relay chain is [`sp_runtime::MultiSignature`]; the //! variants there are the types of signature that can be provided. //! -//! ### signed_extensions_extra +//! ### transaction_extensions_extra //! //! This is the concatenation of the [SCALE encoded][frame::deps::codec] bytes representing each of -//! the [_signed extensions_][sp_runtime::traits::SignedExtension], and are configured by the -//! fourth generic parameter of [`sp_runtime::generic::UncheckedExtrinsic`]. Learn more about -//! signed extensions [here][crate::reference_docs::signed_extensions]. +//! the [_transaction extensions_][sp_runtime::traits::TransactionExtension], and are configured by +//! the fourth generic parameter of [`sp_runtime::generic::UncheckedExtrinsic`]. Learn more about +//! transaction extensions [here][crate::reference_docs::transaction_extensions]. //! -//! When it comes to constructing an extrinsic, each signed extension has two things that we are -//! interested in here: +//! When it comes to constructing an extrinsic, each transaction extension has two things that we +//! are interested in here: //! -//! - The actual SCALE encoding of the signed extension type itself; this is what will form our -//! `signed_extensions_extra` bytes. -//! - An `AdditionalSigned` type. This is SCALE encoded into the `signed_extensions_additional` data -//! of the _signed payload_ (see below). +//! - The actual SCALE encoding of the transaction extension type itself; this is what will form our +//! `transaction_extensions_extra` bytes. +//! - An `Implicit` type. This is SCALE encoded into the `transaction_extensions_implicit` data of +//! the _signed payload_ (see below). //! //! Either (or both) of these can encode to zero bytes. //! -//! Each chain configures the set of signed extensions that it uses in its runtime configuration. -//! At the time of writing, Polkadot configures them +//! Each chain configures the set of transaction extensions that it uses in its runtime +//! configuration. At the time of writing, Polkadot configures them //! [here](https://github.com/polkadot-fellows/runtimes/blob/1dc04eb954eadf8aadb5d83990b89662dbb5a074/relay/polkadot/src/lib.rs#L1432C25-L1432C25). -//! Some of the common signed extensions are defined -//! [here][frame::deps::frame_system#signed-extensions]. +//! Some of the common transaction extensions are defined +//! [here][frame::deps::frame_system#transaction-extensions]. //! -//! Information about exactly which signed extensions are present on a chain and in what order is -//! also a part of the metadata for the chain. For V15 metadata, it can be +//! Information about exactly which transaction extensions are present on a chain and in what order +//! is also a part of the metadata for the chain. For V15 metadata, it can be //! [found here][frame::deps::frame_support::__private::metadata::v15::ExtrinsicMetadata]. //! //! ## call_data @@ -163,8 +163,8 @@ //! ```text //! signed_payload = concat( //! call_data, -//! signed_extensions_extra, -//! signed_extensions_additional, +//! transaction_extensions_extra, +//! transaction_extensions_implicit, //! ) //! //! if length(signed_payload) > 256 { @@ -172,16 +172,16 @@ //! } //! ``` //! -//! The bytes representing `call_data` and `signed_extensions_extra` can be obtained as described -//! above. `signed_extensions_additional` is constructed by SCALE encoding the -//! ["additional signed" data][sp_runtime::traits::SignedExtension::AdditionalSigned] for each -//! signed extension that the chain is using, in order. +//! The bytes representing `call_data` and `transaction_extensions_extra` can be obtained as +//! descibed above. `transaction_extensions_implicit` is constructed by SCALE encoding the +//! ["implicit" data][sp_runtime::traits::TransactionExtensionBase::Implicit] for each +//! transaction extension that the chain is using, in order. //! //! Once we've concatenated those together, we hash the result if it's greater than 256 bytes in //! length using a Blake2 256bit hasher. //! //! The [`sp_runtime::generic::SignedPayload`] type takes care of assembling the correct payload -//! for us, given `call_data` and a tuple of signed extensions. +//! for us, given `call_data` and a tuple of transaction extensions. //! //! # Example Encoding //! @@ -192,11 +192,12 @@ #[docify::export] pub mod call_data { use parity_scale_codec::{Decode, Encode}; + use sp_runtime::{traits::Dispatchable, DispatchResultWithInfo}; // The outer enum composes calls within // different pallets together. We have two // pallets, "PalletA" and "PalletB". - #[derive(Encode, Decode)] + #[derive(Encode, Decode, Clone)] pub enum Call { #[codec(index = 0)] PalletA(PalletACall), @@ -207,23 +208,33 @@ pub mod call_data { // An inner enum represents the calls within // a specific pallet. "PalletA" has one call, // "Foo". - #[derive(Encode, Decode)] + #[derive(Encode, Decode, Clone)] pub enum PalletACall { #[codec(index = 0)] Foo(String), } - #[derive(Encode, Decode)] + #[derive(Encode, Decode, Clone)] pub enum PalletBCall { #[codec(index = 0)] Bar(String), } + + impl Dispatchable for Call { + type RuntimeOrigin = (); + type Config = (); + type Info = (); + type PostInfo = (); + fn dispatch(self, _origin: Self::RuntimeOrigin) -> DispatchResultWithInfo { + Ok(()) + } + } } #[docify::export] pub mod encoding_example { use super::call_data::{Call, PalletACall}; - use crate::reference_docs::signed_extensions::signed_extensions_example; + use crate::reference_docs::transaction_extensions::transaction_extensions_example; use parity_scale_codec::Encode; use sp_core::crypto::AccountId32; use sp_keyring::sr25519::Keyring; @@ -232,34 +243,40 @@ pub mod encoding_example { MultiAddress, MultiSignature, }; - // Define some signed extensions to use. We'll use a couple of examples - // from the signed extensions reference doc. - type SignedExtensions = - (signed_extensions_example::AddToPayload, signed_extensions_example::AddToSignaturePayload); + // Define some transaction extensions to use. We'll use a couple of examples + // from the transaction extensions reference doc. + type TransactionExtensions = ( + transaction_extensions_example::AddToPayload, + transaction_extensions_example::AddToSignaturePayload, + ); // We'll use `UncheckedExtrinsic` to encode our extrinsic for us. We set // the address and signature type to those used on Polkadot, use our custom - // `Call` type, and use our custom set of `SignedExtensions`. - type Extrinsic = - UncheckedExtrinsic, Call, MultiSignature, SignedExtensions>; + // `Call` type, and use our custom set of `TransactionExtensions`. + type Extrinsic = UncheckedExtrinsic< + MultiAddress, + Call, + MultiSignature, + TransactionExtensions, + >; pub fn encode_demo_extrinsic() -> Vec { // The "from" address will be our Alice dev account. let from_address = MultiAddress::::Id(Keyring::Alice.to_account_id()); - // We provide some values for our expected signed extensions. - let signed_extensions = ( - signed_extensions_example::AddToPayload(1), - signed_extensions_example::AddToSignaturePayload, + // We provide some values for our expected transaction extensions. + let transaction_extensions = ( + transaction_extensions_example::AddToPayload(1), + transaction_extensions_example::AddToSignaturePayload, ); // Construct our call data: let call_data = Call::PalletA(PalletACall::Foo("Hello".to_string())); // The signed payload. This takes care of encoding the call_data, - // signed_extensions_extra and signed_extensions_additional, and hashing + // transaction_extensions_extra and transaction_extensions_implicit, and hashing // the result if it's > 256 bytes: - let signed_payload = SignedPayload::new(&call_data, signed_extensions.clone()); + let signed_payload = SignedPayload::new(call_data.clone(), transaction_extensions.clone()); // Sign the signed payload with our Alice dev account's private key, // and wrap the signature into the expected type: @@ -269,7 +286,7 @@ pub mod encoding_example { }; // Now, we can build and encode our extrinsic: - let ext = Extrinsic::new_signed(call_data, from_address, signature, signed_extensions); + let ext = Extrinsic::new_signed(call_data, from_address, signature, transaction_extensions); let encoded_ext = ext.encode(); encoded_ext diff --git a/docs/sdk/src/reference_docs/frame_runtime_types.rs b/docs/sdk/src/reference_docs/frame_runtime_types.rs index b5838b79e51..883892729d1 100644 --- a/docs/sdk/src/reference_docs/frame_runtime_types.rs +++ b/docs/sdk/src/reference_docs/frame_runtime_types.rs @@ -130,7 +130,7 @@ //! * [`crate::reference_docs::frame_origin`] explores further details about the usage of //! `RuntimeOrigin`. //! * [`RuntimeCall`] is a particularly interesting composite enum as it dictates the encoding of an -//! extrinsic. See [`crate::reference_docs::signed_extensions`] for more information. +//! extrinsic. See [`crate::reference_docs::transaction_extensions`] for more information. //! * See the documentation of [`construct_runtime`]. //! * See the corresponding lecture in the [pba-book](https://polkadot-blockchain-academy.github.io/pba-book/frame/outer-enum/page.html). //! diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index 0bd204e4b6a..3fd815d2a15 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -39,9 +39,9 @@ pub mod runtime_vs_smart_contract; /// Learn about how extrinsics are encoded to be transmitted to a node and stored in blocks. pub mod extrinsic_encoding; -/// Learn about the signed extensions that form a part of extrinsics. +/// Learn about the transaction extensions that form a part of extrinsics. // TODO: @jsdw https://github.com/paritytech/polkadot-sdk-docs/issues/42 -pub mod signed_extensions; +pub mod transaction_extensions; /// Learn about *Origins*, a topic in FRAME that enables complex account abstractions to be built. pub mod frame_origin; diff --git a/docs/sdk/src/reference_docs/signed_extensions.rs b/docs/sdk/src/reference_docs/signed_extensions.rs deleted file mode 100644 index 28b1426536b..00000000000 --- a/docs/sdk/src/reference_docs/signed_extensions.rs +++ /dev/null @@ -1,79 +0,0 @@ -//! Signed extensions are, briefly, a means for different chains to extend the "basic" extrinsic -//! format with custom data that can be checked by the runtime. -//! -//! # Example -//! -//! Defining a couple of very simple signed extensions looks like the following: -#![doc = docify::embed!("./src/reference_docs/signed_extensions.rs", signed_extensions_example)] - -#[docify::export] -pub mod signed_extensions_example { - use parity_scale_codec::{Decode, Encode}; - use scale_info::TypeInfo; - use sp_runtime::traits::SignedExtension; - - // This doesn't actually check anything, but simply allows - // some arbitrary `u32` to be added to the extrinsic payload - #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] - pub struct AddToPayload(pub u32); - - impl SignedExtension for AddToPayload { - const IDENTIFIER: &'static str = "AddToPayload"; - type AccountId = (); - type Call = (); - type AdditionalSigned = (); - type Pre = (); - - fn additional_signed( - &self, - ) -> Result< - Self::AdditionalSigned, - sp_runtime::transaction_validity::TransactionValidityError, - > { - Ok(()) - } - - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } - } - - // This is the opposite; nothing will be added to the extrinsic payload, - // but the AdditionalSigned type (`1234u32`) will be added to the - // payload to be signed. - #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] - pub struct AddToSignaturePayload; - - impl SignedExtension for AddToSignaturePayload { - const IDENTIFIER: &'static str = "AddToSignaturePayload"; - type AccountId = (); - type Call = (); - type AdditionalSigned = u32; - type Pre = (); - - fn additional_signed( - &self, - ) -> Result< - Self::AdditionalSigned, - sp_runtime::transaction_validity::TransactionValidityError, - > { - Ok(1234) - } - - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } - } -} diff --git a/docs/sdk/src/reference_docs/transaction_extensions.rs b/docs/sdk/src/reference_docs/transaction_extensions.rs new file mode 100644 index 00000000000..4f96d665d9b --- /dev/null +++ b/docs/sdk/src/reference_docs/transaction_extensions.rs @@ -0,0 +1,57 @@ +//! Transaction extensions are, briefly, a means for different chains to extend the "basic" +//! extrinsic format with custom data that can be checked by the runtime. +//! +//! # Example +//! +//! Defining a couple of very simple transaction extensions looks like the following: +#![doc = docify::embed!("./src/reference_docs/transaction_extensions.rs", transaction_extensions_example)] + +#[docify::export] +pub mod transaction_extensions_example { + use parity_scale_codec::{Decode, Encode}; + use scale_info::TypeInfo; + use sp_runtime::{ + impl_tx_ext_default, + traits::{Dispatchable, TransactionExtension, TransactionExtensionBase}, + TransactionValidityError, + }; + + // This doesn't actually check anything, but simply allows + // some arbitrary `u32` to be added to the extrinsic payload + #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] + pub struct AddToPayload(pub u32); + + impl TransactionExtensionBase for AddToPayload { + const IDENTIFIER: &'static str = "AddToPayload"; + type Implicit = (); + } + + impl TransactionExtension for AddToPayload { + type Pre = (); + type Val = (); + + impl_tx_ext_default!(Call; (); validate prepare); + } + + // This is the opposite; nothing will be added to the extrinsic payload, + // but the Implicit type (`1234u32`) will be added to the + // payload to be signed. + #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] + pub struct AddToSignaturePayload; + + impl TransactionExtensionBase for AddToSignaturePayload { + const IDENTIFIER: &'static str = "AddToSignaturePayload"; + type Implicit = u32; + + fn implicit(&self) -> Result { + Ok(1234) + } + } + + impl TransactionExtension for AddToSignaturePayload { + type Pre = (); + type Val = (); + + impl_tx_ext_default!(Call; (); validate prepare); + } +} diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 734dcbdeb44..180de70ad6c 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -196,6 +196,7 @@ runtime-benchmarks = [ "pallet-babe/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", "pallet-staking/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", diff --git a/polkadot/node/service/src/benchmarking.rs b/polkadot/node/service/src/benchmarking.rs index 400daf1aee3..29acc5c3ca8 100644 --- a/polkadot/node/service/src/benchmarking.rs +++ b/polkadot/node/service/src/benchmarking.rs @@ -189,7 +189,7 @@ fn westend_sign_call( use sp_core::Pair; use westend_runtime as runtime; - let extra: runtime::SignedExtra = ( + let tx_ext: runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -201,11 +201,12 @@ fn westend_sign_call( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), - ); + ) + .into(); let payload = runtime::SignedPayload::from_raw( call.clone(), - extra.clone(), + tx_ext.clone(), ( (), runtime::VERSION.spec_version, @@ -223,7 +224,7 @@ fn westend_sign_call( call, sp_runtime::AccountId32::from(acc.public()).into(), polkadot_core_primitives::Signature::Sr25519(signature.clone()), - extra, + tx_ext, ) .into() } @@ -241,7 +242,7 @@ fn rococo_sign_call( use rococo_runtime as runtime; use sp_core::Pair; - let extra: runtime::SignedExtra = ( + let tx_ext: runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -253,11 +254,12 @@ fn rococo_sign_call( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), - ); + ) + .into(); let payload = runtime::SignedPayload::from_raw( call.clone(), - extra.clone(), + tx_ext.clone(), ( (), runtime::VERSION.spec_version, @@ -275,7 +277,7 @@ fn rococo_sign_call( call, sp_runtime::AccountId32::from(acc.public()).into(), polkadot_core_primitives::Signature::Sr25519(signature.clone()), - extra, + tx_ext, ) .into() } diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index e7892abcd87..02b227e6f34 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -71,6 +71,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-staking/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index ca904bae28d..1876595c7b4 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -32,7 +32,7 @@ use polkadot_service::{ Error, FullClient, IsParachainNode, NewFull, OverseerGen, PrometheusConfig, }; use polkadot_test_runtime::{ - ParasCall, ParasSudoWrapperCall, Runtime, SignedExtra, SignedPayload, SudoCall, + ParasCall, ParasSudoWrapperCall, Runtime, SignedPayload, SudoCall, TxExtension, UncheckedExtrinsic, VERSION, }; @@ -382,7 +382,7 @@ pub fn construct_extrinsic( let period = BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64; let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -391,10 +391,11 @@ pub fn construct_extrinsic( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ); + ) + .into(); let raw_payload = SignedPayload::from_raw( function.clone(), - extra.clone(), + tx_ext.clone(), ( (), VERSION.spec_version, @@ -411,7 +412,7 @@ pub fn construct_extrinsic( function.clone(), polkadot_test_runtime::Address::Id(caller.public().into()), polkadot_primitives::Signature::Sr25519(signature.clone()), - extra.clone(), + tx_ext.clone(), ) } diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index eae5d4fb2ef..c2205938295 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -132,6 +132,7 @@ runtime-benchmarks = [ "pallet-identity/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "primitives/runtime-benchmarks", diff --git a/polkadot/runtime/common/src/claims.rs b/polkadot/runtime/common/src/claims.rs index 68f42914e44..57a95b22af3 100644 --- a/polkadot/runtime/common/src/claims.rs +++ b/polkadot/runtime/common/src/claims.rs @@ -29,7 +29,11 @@ use scale_info::TypeInfo; use serde::{self, Deserialize, Deserializer, Serialize, Serializer}; use sp_io::{crypto::secp256k1_ecdsa_recover, hashing::keccak_256}; use sp_runtime::{ - traits::{CheckedSub, DispatchInfoOf, SignedExtension, Zero}, + impl_tx_ext_default, + traits::{ + AsSystemOriginSigner, CheckedSub, DispatchInfoOf, Dispatchable, TransactionExtension, + TransactionExtensionBase, Zero, + }, transaction_validity::{ InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, @@ -50,6 +54,7 @@ pub trait WeightInfo { fn claim_attest() -> Weight; fn attest() -> Weight; fn move_claim() -> Weight; + fn prevalidate_attests() -> Weight; } pub struct TestWeightInfo; @@ -69,6 +74,9 @@ impl WeightInfo for TestWeightInfo { fn move_claim() -> Weight { Weight::zero() } + fn prevalidate_attests() -> Weight { + Weight::zero() + } } /// The kind of statement an account needs to make for a claim to be valid. @@ -84,7 +92,7 @@ pub enum StatementKind { impl StatementKind { /// Convert this to the (English) statement it represents. - fn to_text(self) -> &'static [u8] { + pub fn to_text(self) -> &'static [u8] { match self { StatementKind::Regular => &b"I hereby agree to the terms of the statement whose SHA-256 multihash is \ @@ -166,6 +174,13 @@ pub mod pallet { #[pallet::without_storage_info] pub struct Pallet(_); + #[cfg(feature = "runtime-benchmarks")] + /// Helper trait to benchmark the `PrevalidateAttests` transaction extension. + pub trait BenchmarkHelperTrait { + /// `Call` to be used when benchmarking the transaction extension. + fn default_call_and_info() -> (RuntimeCall, DispatchInfo); + } + /// Configuration trait. #[pallet::config] pub trait Config: frame_system::Config { @@ -176,6 +191,12 @@ pub mod pallet { type Prefix: Get<&'static [u8]>; type MoveClaimOrigin: EnsureOrigin; type WeightInfo: WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + /// Benchmark helper + type BenchmarkHelper: BenchmarkHelperTrait< + Self::RuntimeCall, + DispatchInfoOf, + >; } #[pallet::event] @@ -402,7 +423,7 @@ pub mod pallet { /// Attest to a statement, needed to finalize the claims process. /// /// WARNING: Insecure unless your chain includes `PrevalidateAttests` as a - /// `SignedExtension`. + /// `TransactionExtension`. /// /// Unsigned Validation: /// A call to attest is deemed valid if the sender has a `Preclaim` registered @@ -612,46 +633,46 @@ impl PrevalidateAttests where ::RuntimeCall: IsSubType>, { - /// Create new `SignedExtension` to check runtime version. + /// Create new `TransactionExtension` to check runtime version. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for PrevalidateAttests +impl TransactionExtensionBase for PrevalidateAttests where ::RuntimeCall: IsSubType>, { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); const IDENTIFIER: &'static str = "PrevalidateAttests"; + type Implicit = (); +} - fn additional_signed(&self) -> Result { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } +impl TransactionExtension for PrevalidateAttests +where + ::RuntimeCall: IsSubType>, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + AsSystemOriginSigner + Clone, +{ + type Pre = (); + type Val = (); // // The weight of this logic is included in the `attest` dispatchable. // fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &T::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; if let Some(local_call) = call.is_sub_type() { if let Call::attest { statement: attested_statement } = local_call { let signer = Preclaims::::get(who) @@ -662,8 +683,9 @@ where } } } - Ok(ValidTransaction::default()) + Ok((ValidTransaction::default(), (), origin)) } + impl_tx_ext_default!(T::RuntimeCall; Context; prepare); } #[cfg(any(test, feature = "runtime-benchmarks"))] @@ -696,7 +718,7 @@ mod secp_utils { } #[cfg(test)] -mod tests { +pub(super) mod tests { use super::*; use hex_literal::hex; use secp_utils::*; @@ -714,8 +736,11 @@ mod tests { }; use pallet_balances; use sp_runtime::{ - traits::Identity, transaction_validity::TransactionLongevity, BuildStorage, - DispatchError::BadOrigin, TokenError, + traits::{DispatchTransaction, Identity}, + transaction_validity::TransactionLongevity, + BuildStorage, + DispatchError::BadOrigin, + TokenError, }; type Block = frame_system::mocking::MockBlock; @@ -790,6 +815,19 @@ mod tests { type Prefix = Prefix; type MoveClaimOrigin = frame_system::EnsureSignedBy; type WeightInfo = TestWeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); + } + + #[cfg(feature = "runtime-benchmarks")] + impl BenchmarkHelperTrait> for () { + fn default_call_and_info() -> (RuntimeCall, DispatchInfoOf) { + let call = RuntimeCall::Claims(crate::claims::Call::attest { + statement: StatementKind::Regular.to_text().to_vec(), + }); + let info = call.get_dispatch_info(); + (call, info) + } } fn alice() -> libsecp256k1::SecretKey { @@ -1071,8 +1109,8 @@ mod tests { }); let di = c.get_dispatch_info(); assert_eq!(di.pays_fee, Pays::No); - let r = p.validate(&42, &c, &di, 20); - assert_eq!(r, TransactionValidity::Ok(ValidTransaction::default())); + let r = p.validate_only(Some(42).into(), &c, &di, 20); + assert_eq!(r.unwrap().0, ValidTransaction::default()); }); } @@ -1084,13 +1122,13 @@ mod tests { statement: StatementKind::Regular.to_text().to_vec(), }); let di = c.get_dispatch_info(); - let r = p.validate(&42, &c, &di, 20); + let r = p.validate_only(Some(42).into(), &c, &di, 20); assert!(r.is_err()); let c = RuntimeCall::Claims(ClaimsCall::attest { statement: StatementKind::Saft.to_text().to_vec(), }); let di = c.get_dispatch_info(); - let r = p.validate(&69, &c, &di, 20); + let r = p.validate_only(Some(69).into(), &c, &di, 20); assert!(r.is_err()); }); } @@ -1444,14 +1482,17 @@ mod tests { } #[cfg(feature = "runtime-benchmarks")] -mod benchmarking { +pub(super) mod benchmarking { use super::*; use crate::claims::Call; use frame_benchmarking::{account, benchmarks}; use frame_support::traits::UnfilteredDispatchable; use frame_system::RawOrigin; use secp_utils::*; - use sp_runtime::{traits::ValidateUnsigned, DispatchResult}; + use sp_runtime::{ + traits::{DispatchTransaction, ValidateUnsigned}, + DispatchResult, + }; const SEED: u32 = 0; @@ -1487,6 +1528,11 @@ mod benchmarking { } benchmarks! { + where_clause { where ::RuntimeCall: IsSubType>, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: AsSystemOriginSigner + Clone, + <::RuntimeCall as Dispatchable>::PostInfo: Default, + } + // Benchmark `claim` including `validate_unsigned` logic. claim { let c = MAX_CLAIMS; @@ -1665,6 +1711,38 @@ mod benchmarking { } } + prevalidate_attests { + let c = MAX_CLAIMS; + + for i in 0 .. c / 2 { + create_claim::(c)?; + create_claim_attest::(u32::MAX - c)?; + } + + let ext = PrevalidateAttests::::new(); + let (call, info) = T::BenchmarkHelper::default_call_and_info(); + let attest_c = u32::MAX - c; + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); + let eth_address = eth(&secret_key); + let account: T::AccountId = account("user", c, SEED); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + let statement = StatementKind::Regular; + let signature = sig::(&secret_key, &account.encode(), statement.to_text()); + super::Pallet::::mint_claim(RawOrigin::Root.into(), eth_address, VALUE.into(), vesting, Some(statement))?; + Preclaims::::insert(&account, eth_address); + assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); + }: { + assert!(ext.test_run( + RawOrigin::Signed(account).into(), + &call, + &info, + 0, + |_| { + Ok(Default::default()) + } + ).unwrap().is_ok()); + } + impl_benchmark_test_suite!( Pallet, crate::claims::tests::new_test_ext(), diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 3dc59cc1728..61ca1635ac9 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -246,6 +246,7 @@ runtime-benchmarks = [ "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-tips/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", diff --git a/polkadot/runtime/rococo/constants/src/weights/block_weights.rs b/polkadot/runtime/rococo/constants/src/weights/block_weights.rs index e2aa4a6cab7..f7dc2f19316 100644 --- a/polkadot/runtime/rococo/constants/src/weights/block_weights.rs +++ b/polkadot/runtime/rococo/constants/src/weights/block_weights.rs @@ -14,13 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26 (Y/M/D) -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29 (Y/M/D) +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `block`, LONG-NAME: `BlockExecution`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` -//! WEIGHT-PATH: `runtime/rococo/constants/src/weights/` +//! WEIGHT-PATH: `./polkadot/runtime/rococo/constants/src/weights/` //! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` // Executed Command: @@ -28,12 +28,11 @@ // benchmark // overhead // --chain=rococo-dev -// --execution=wasm // --wasm-execution=compiled -// --weight-path=runtime/rococo/constants/src/weights/ +// --weight-path=./polkadot/runtime/rococo/constants/src/weights/ // --warmup=10 // --repeat=100 -// --header=./file_header.txt +// --header=./polkadot/file_header.txt use sp_core::parameter_types; use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; @@ -43,17 +42,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 408_659, 450_716 - /// Average: 417_412 - /// Median: 411_177 - /// Std-Dev: 12242.31 + /// Min, Max: 440_142, 476_907 + /// Average: 450_240 + /// Median: 448_633 + /// Std-Dev: 7301.18 /// /// Percentiles nanoseconds: - /// 99th: 445_142 - /// 95th: 442_275 - /// 75th: 414_217 + /// 99th: 470_733 + /// 95th: 465_082 + /// 75th: 452_536 pub const BlockExecutionWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(417_412), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(450_240), 0); } #[cfg(test)] diff --git a/polkadot/runtime/rococo/constants/src/weights/extrinsic_weights.rs b/polkadot/runtime/rococo/constants/src/weights/extrinsic_weights.rs index adce840ebbc..000cee8a237 100644 --- a/polkadot/runtime/rococo/constants/src/weights/extrinsic_weights.rs +++ b/polkadot/runtime/rococo/constants/src/weights/extrinsic_weights.rs @@ -14,13 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26 (Y/M/D) -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29 (Y/M/D) +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `extrinsic`, LONG-NAME: `ExtrinsicBase`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` -//! WEIGHT-PATH: `runtime/rococo/constants/src/weights/` +//! WEIGHT-PATH: `./polkadot/runtime/rococo/constants/src/weights/` //! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` // Executed Command: @@ -28,12 +28,11 @@ // benchmark // overhead // --chain=rococo-dev -// --execution=wasm // --wasm-execution=compiled -// --weight-path=runtime/rococo/constants/src/weights/ +// --weight-path=./polkadot/runtime/rococo/constants/src/weights/ // --warmup=10 // --repeat=100 -// --header=./file_header.txt +// --header=./polkadot/file_header.txt use sp_core::parameter_types; use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; @@ -43,17 +42,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 97_574, 100_119 - /// Average: 98_236 - /// Median: 98_179 - /// Std-Dev: 394.9 + /// Min, Max: 92_961, 94_143 + /// Average: 93_369 + /// Median: 93_331 + /// Std-Dev: 217.39 /// /// Percentiles nanoseconds: - /// 99th: 99_893 - /// 95th: 98_850 - /// 75th: 98_318 + /// 99th: 93_848 + /// 95th: 93_691 + /// 75th: 93_514 pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(98_236), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(93_369), 0); } #[cfg(test)] diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 96e8c4e0979..ff54c777670 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -199,6 +199,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type MaxConsumers = frame_support::traits::ConstU32<16>; } @@ -329,6 +330,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -591,7 +593,7 @@ where // so the actual block number is `n`. .saturating_sub(1); let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -603,16 +605,17 @@ where frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ); - let raw_payload = SignedPayload::new(call, extra) + ) + .into(); + let raw_payload = SignedPayload::new(call, tx_ext) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; - let (call, extra, _) = raw_payload.deconstruct(); + let (call, tx_ext, _) = raw_payload.deconstruct(); let address = ::Lookup::unlookup(account); - Some((call, (address, signature, extra))) + Some((call, (address, signature, tx_ext))) } } @@ -633,12 +636,32 @@ parameter_types! { pub Prefix: &'static [u8] = b"Pay ROCs to the Rococo account:"; } +#[cfg(feature = "runtime-benchmarks")] +pub struct ClaimsHelper; + +#[cfg(feature = "runtime-benchmarks")] +use frame_support::dispatch::DispatchInfo; + +#[cfg(feature = "runtime-benchmarks")] +impl claims::BenchmarkHelperTrait for ClaimsHelper { + fn default_call_and_info() -> (RuntimeCall, DispatchInfo) { + use frame_support::dispatch::GetDispatchInfo; + let call = RuntimeCall::Claims(claims::Call::attest { + statement: claims::StatementKind::Regular.to_text().to_vec(), + }); + let info = call.get_dispatch_info(); + (call, info) + } +} + impl claims::Config for Runtime { type RuntimeEvent = RuntimeEvent; type VestingSchedule = Vesting; type Prefix = Prefix; type MoveClaimOrigin = EnsureRoot; type WeightInfo = weights::runtime_common_claims::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = ClaimsHelper; } parameter_types! { @@ -1447,8 +1470,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The `SignedExtension` to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -1461,7 +1484,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// All migrations that will run on the next runtime upgrade. /// @@ -1675,7 +1698,7 @@ pub type Executive = frame_executive::Executive< Migrations, >; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; parameter_types! { // The deposit configuration for the singed migration. Specially if you want to allow any signed account to do the migration (see `SignedFilter`, these deposits should be high) @@ -1746,7 +1769,9 @@ mod benches { [pallet_scheduler, Scheduler] [pallet_sudo, Sudo] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_treasury, Treasury] [pallet_utility, Utility] [pallet_vesting, Vesting] @@ -2240,6 +2265,7 @@ sp_api::impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use frame_benchmarking::baseline::Pallet as Baseline; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -2260,6 +2286,7 @@ sp_api::impl_runtime_apis! { use frame_support::traits::WhitelistedStorageKeys; use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use frame_benchmarking::baseline::Pallet as Baseline; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use sp_storage::TrackedStorageKey; diff --git a/polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs b/polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs index dfba0cfc4aa..0f68a5c6fb3 100644 --- a/polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs +++ b/polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `frame_benchmarking::baseline` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=frame_benchmarking::baseline // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/frame_benchmarking_baseline.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,8 +55,8 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 157_000 picoseconds. - Weight::from_parts(175_233, 0) + // Minimum execution time: 172_000 picoseconds. + Weight::from_parts(199_481, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `i` is `[0, 1000000]`. @@ -61,8 +64,8 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 149_000 picoseconds. - Weight::from_parts(183_285, 0) + // Minimum execution time: 171_000 picoseconds. + Weight::from_parts(197_821, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `i` is `[0, 1000000]`. @@ -70,8 +73,8 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 158_000 picoseconds. - Weight::from_parts(184_720, 0) + // Minimum execution time: 172_000 picoseconds. + Weight::from_parts(200_942, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `i` is `[0, 1000000]`. @@ -79,16 +82,16 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 152_000 picoseconds. - Weight::from_parts(177_496, 0) + // Minimum execution time: 170_000 picoseconds. + Weight::from_parts(196_906, 0) .saturating_add(Weight::from_parts(0, 0)) } fn hashing() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 19_907_376_000 picoseconds. - Weight::from_parts(19_988_727_000, 0) + // Minimum execution time: 23_346_876_000 picoseconds. + Weight::from_parts(23_363_744_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `i` is `[0, 100]`. @@ -96,10 +99,10 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 198_000 picoseconds. - Weight::from_parts(228_000, 0) + // Minimum execution time: 201_000 picoseconds. + Weight::from_parts(219_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 20_467 - .saturating_add(Weight::from_parts(47_443_635, 0).saturating_mul(i.into())) + // Standard Error: 14_372 + .saturating_add(Weight::from_parts(45_375_800, 0).saturating_mul(i.into())) } } diff --git a/polkadot/runtime/rococo/src/weights/frame_system.rs b/polkadot/runtime/rococo/src/weights/frame_system.rs index 2e49483dcc6..1742a761ca7 100644 --- a/polkadot/runtime/rococo/src/weights/frame_system.rs +++ b/polkadot/runtime/rococo/src/weights/frame_system.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `frame_system` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=frame_system // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,91 +55,91 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_283_000 picoseconds. - Weight::from_parts(2_305_000, 0) + // Minimum execution time: 1_541_000 picoseconds. + Weight::from_parts(2_581_470, 0) .saturating_add(Weight::from_parts(0, 0)) // Standard Error: 0 - .saturating_add(Weight::from_parts(366, 0).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_435_000 picoseconds. - Weight::from_parts(7_581_000, 0) + // Minimum execution time: 5_060_000 picoseconds. + Weight::from_parts(5_167_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_408, 0).saturating_mul(b.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_696, 0).saturating_mul(b.into())) } - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a686561707061676573` (r:0 w:1) - /// Proof Skipped: unknown `0x3a686561707061676573` (r:0 w:1) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) fn set_heap_pages() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 4_010_000 picoseconds. - Weight::from_parts(4_112_000, 0) + // Minimum execution time: 2_649_000 picoseconds. + Weight::from_parts(2_909_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a636f6465` (r:0 w:1) - /// Proof Skipped: unknown `0x3a636f6465` (r:0 w:1) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn set_code() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 80_405_511_000 picoseconds. - Weight::from_parts(83_066_478_000, 0) + // Minimum execution time: 88_417_540_000 picoseconds. + Weight::from_parts(91_809_291_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn set_storage(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_210_000 picoseconds. - Weight::from_parts(2_247_000, 0) + // Minimum execution time: 1_538_000 picoseconds. + Weight::from_parts(1_589_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_058 - .saturating_add(Weight::from_parts(673_943, 0).saturating_mul(i.into())) + // Standard Error: 1_740 + .saturating_add(Weight::from_parts(730_941, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn kill_storage(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_125_000 picoseconds. - Weight::from_parts(2_154_000, 0) + // Minimum execution time: 1_567_000 picoseconds. + Weight::from_parts(1_750_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 816 - .saturating_add(Weight::from_parts(491_194, 0).saturating_mul(i.into())) + // Standard Error: 835 + .saturating_add(Weight::from_parts(543_218, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `129 + p * (69 ±0)` - // Estimated: `125 + p * (70 ±0)` - // Minimum execution time: 4_002_000 picoseconds. - Weight::from_parts(4_145_000, 0) - .saturating_add(Weight::from_parts(0, 125)) - // Standard Error: 1_108 - .saturating_add(Weight::from_parts(1_014_971, 0).saturating_mul(p.into())) + // Measured: `80 + p * (69 ±0)` + // Estimated: `83 + p * (70 ±0)` + // Minimum execution time: 3_412_000 picoseconds. + Weight::from_parts(3_448_000, 0) + .saturating_add(Weight::from_parts(0, 83)) + // Standard Error: 1_395 + .saturating_add(Weight::from_parts(1_142_347, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) @@ -147,8 +150,8 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 33_027_000 picoseconds. - Weight::from_parts(33_027_000, 0) + // Minimum execution time: 9_178_000 picoseconds. + Weight::from_parts(9_780_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +165,8 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `22` // Estimated: `1518` - // Minimum execution time: 118_101_992_000 picoseconds. - Weight::from_parts(118_101_992_000, 0) + // Minimum execution time: 94_523_563_000 picoseconds. + Weight::from_parts(96_983_131_000, 0) .saturating_add(Weight::from_parts(0, 1518)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs b/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 00000000000..40520591f53 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,123 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=frame_system_extensions +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_262_000 picoseconds. + Weight::from_parts(3_497_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_416_000 picoseconds. + Weight::from_parts(5_690_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 471_000 picoseconds. + Weight::from_parts(552_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 4_847_000 picoseconds. + Weight::from_parts(5_091_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 388_000 picoseconds. + Weight::from_parts(421_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 378_000 picoseconds. + Weight::from_parts(440_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 3_402_000 picoseconds. + Weight::from_parts(3_627_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/mod.rs b/polkadot/runtime/rococo/src/weights/mod.rs index 7328dca9393..128a3083a9c 100644 --- a/polkadot/runtime/rococo/src/weights/mod.rs +++ b/polkadot/runtime/rococo/src/weights/mod.rs @@ -16,6 +16,7 @@ //! A list of the different weight modules for our runtime. pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_asset_rate; pub mod pallet_balances_balances; pub mod pallet_balances_nis_counterpart_balances; @@ -36,6 +37,7 @@ pub mod pallet_scheduler; pub mod pallet_session; pub mod pallet_sudo; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_vesting; diff --git a/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs b/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs index da2d1958cef..56b1e2cbc57 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs @@ -16,25 +16,28 @@ //! Autogenerated weights for `pallet_asset_rate` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-03, STEPS: `50`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/debug/polkadot +// ./target/production/polkadot // benchmark // pallet // --chain=rococo-dev // --steps=50 -// --repeat=2 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_asset_rate // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./runtime/rococo/src/weights/ -// --header=./file_header.txt +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,39 +50,39 @@ use core::marker::PhantomData; /// Weight functions for `pallet_asset_rate`. pub struct WeightInfo(PhantomData); impl pallet_asset_rate::WeightInfo for WeightInfo { - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `4702` - // Minimum execution time: 143_000_000 picoseconds. - Weight::from_parts(155_000_000, 0) - .saturating_add(Weight::from_parts(0, 4702)) + // Measured: `142` + // Estimated: `4703` + // Minimum execution time: 10_277_000 picoseconds. + Weight::from_parts(10_487_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) fn update() -> Weight { // Proof Size summary in bytes: - // Measured: `110` - // Estimated: `4702` - // Minimum execution time: 156_000_000 picoseconds. - Weight::from_parts(172_000_000, 0) - .saturating_add(Weight::from_parts(0, 4702)) + // Measured: `210` + // Estimated: `4703` + // Minimum execution time: 10_917_000 picoseconds. + Weight::from_parts(11_249_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) fn remove() -> Weight { // Proof Size summary in bytes: - // Measured: `110` - // Estimated: `4702` - // Minimum execution time: 150_000_000 picoseconds. - Weight::from_parts(160_000_000, 0) - .saturating_add(Weight::from_parts(0, 4702)) + // Measured: `210` + // Estimated: `4703` + // Minimum execution time: 11_332_000 picoseconds. + Weight::from_parts(11_866_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_balances_balances.rs b/polkadot/runtime/rococo/src/weights/pallet_balances_balances.rs index 1b0ae1eeece..3fa54f2c369 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_balances_balances.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_balances_balances.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_balances` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_balances // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_balances -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -54,8 +56,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 44_127_000 picoseconds. - Weight::from_parts(45_099_000, 0) + // Minimum execution time: 45_336_000 picoseconds. + Weight::from_parts(46_189_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -66,8 +68,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 34_265_000 picoseconds. - Weight::from_parts(35_083_000, 0) + // Minimum execution time: 34_880_000 picoseconds. + Weight::from_parts(35_770_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -78,8 +80,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 12_189_000 picoseconds. - Weight::from_parts(12_655_000, 0) + // Minimum execution time: 12_904_000 picoseconds. + Weight::from_parts(13_260_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -90,8 +92,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 16_910_000 picoseconds. - Weight::from_parts(17_474_000, 0) + // Minimum execution time: 17_669_000 picoseconds. + Weight::from_parts(18_228_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -102,8 +104,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6196` - // Minimum execution time: 45_212_000 picoseconds. - Weight::from_parts(46_320_000, 0) + // Minimum execution time: 46_492_000 picoseconds. + Weight::from_parts(47_639_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -114,8 +116,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 42_500_000 picoseconds. - Weight::from_parts(43_991_000, 0) + // Minimum execution time: 44_342_000 picoseconds. + Weight::from_parts(45_144_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -126,8 +128,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 15_197_000 picoseconds. - Weight::from_parts(15_749_000, 0) + // Minimum execution time: 15_260_000 picoseconds. + Weight::from_parts(15_775_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -140,11 +142,11 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0 + u * (135 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 14_414_000 picoseconds. - Weight::from_parts(14_685_000, 0) + // Minimum execution time: 14_703_000 picoseconds. + Weight::from_parts(14_950_000, 0) .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 7_918 - .saturating_add(Weight::from_parts(13_095_420, 0).saturating_mul(u.into())) + // Standard Error: 7_665 + .saturating_add(Weight::from_parts(13_335_803, 0).saturating_mul(u.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -153,8 +155,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_239_000 picoseconds. - Weight::from_parts(5_617_000, 0) + // Minimum execution time: 5_506_000 picoseconds. + Weight::from_parts(5_753_000, 0) .saturating_add(Weight::from_parts(0, 0)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_balances_nis_counterpart_balances.rs b/polkadot/runtime/rococo/src/weights/pallet_balances_nis_counterpart_balances.rs index 6cca9b9320a..1852ba6c2c4 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_balances_nis_counterpart_balances.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_balances_nis_counterpart_balances.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_balances` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_balances // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_balances -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -56,8 +58,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6164` - // Minimum execution time: 41_978_000 picoseconds. - Weight::from_parts(42_989_000, 0) + // Minimum execution time: 42_443_000 picoseconds. + Weight::from_parts(43_250_000, 0) .saturating_add(Weight::from_parts(0, 6164)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -70,8 +72,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6164` - // Minimum execution time: 32_250_000 picoseconds. - Weight::from_parts(33_074_000, 0) + // Minimum execution time: 32_417_000 picoseconds. + Weight::from_parts(33_247_000, 0) .saturating_add(Weight::from_parts(0, 6164)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -82,8 +84,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3577` - // Minimum execution time: 9_906_000 picoseconds. - Weight::from_parts(10_397_000, 0) + // Minimum execution time: 10_091_000 picoseconds. + Weight::from_parts(10_426_000, 0) .saturating_add(Weight::from_parts(0, 3577)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -96,8 +98,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `277` // Estimated: `3593` - // Minimum execution time: 16_298_000 picoseconds. - Weight::from_parts(17_115_000, 0) + // Minimum execution time: 16_546_000 picoseconds. + Weight::from_parts(17_259_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -110,8 +112,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `6196` - // Minimum execution time: 43_283_000 picoseconds. - Weight::from_parts(44_033_000, 0) + // Minimum execution time: 44_322_000 picoseconds. + Weight::from_parts(45_319_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) @@ -124,8 +126,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6164` - // Minimum execution time: 40_564_000 picoseconds. - Weight::from_parts(41_597_000, 0) + // Minimum execution time: 40_852_000 picoseconds. + Weight::from_parts(42_205_000, 0) .saturating_add(Weight::from_parts(0, 6164)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -138,8 +140,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `277` // Estimated: `3593` - // Minimum execution time: 15_018_000 picoseconds. - Weight::from_parts(15_532_000, 0) + // Minimum execution time: 15_050_000 picoseconds. + Weight::from_parts(15_813_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -154,11 +156,11 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0 + u * (256 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 14_470_000 picoseconds. - Weight::from_parts(14_828_000, 0) + // Minimum execution time: 14_830_000 picoseconds. + Weight::from_parts(15_061_000, 0) .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 15_515 - .saturating_add(Weight::from_parts(14_505_553, 0).saturating_mul(u.into())) + // Standard Error: 16_072 + .saturating_add(Weight::from_parts(14_981_430, 0).saturating_mul(u.into())) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(u.into()))) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -167,8 +169,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_277_000 picoseconds. - Weight::from_parts(5_628_000, 0) + // Minimum execution time: 5_344_000 picoseconds. + Weight::from_parts(5_735_000, 0) .saturating_add(Weight::from_parts(0, 0)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_bounties.rs b/polkadot/runtime/rococo/src/weights/pallet_bounties.rs index 38d3645316f..8f8be5f2386 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_bounties.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_bounties.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_bounties` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_bounties // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,118 +50,181 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bounties`. pub struct WeightInfo(PhantomData); impl pallet_bounties::WeightInfo for WeightInfo { - /// Storage: Bounties BountyCount (r:1 w:1) - /// Proof: Bounties BountyCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(16400), added: 18875, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:0 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) + /// Storage: `Bounties::BountyCount` (r:1 w:1) + /// Proof: `Bounties::BountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:0 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 16384]`. fn propose_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `210` // Estimated: `3593` - // Minimum execution time: 28_907_000 picoseconds. - Weight::from_parts(31_356_074, 0) + // Minimum execution time: 21_772_000 picoseconds. + Weight::from_parts(22_861_341, 0) .saturating_add(Weight::from_parts(0, 3593)) - // Standard Error: 18 - .saturating_add(Weight::from_parts(606, 0).saturating_mul(d.into())) + // Standard Error: 3 + .saturating_add(Weight::from_parts(721, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn approve_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `302` + // Estimated: `3642` + // Minimum execution time: 11_218_000 picoseconds. + Weight::from_parts(11_796_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `322` + // Estimated: `3642` + // Minimum execution time: 10_959_000 picoseconds. + Weight::from_parts(11_658_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `498` + // Estimated: `3642` + // Minimum execution time: 37_419_000 picoseconds. + Weight::from_parts(38_362_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `494` + // Estimated: `3642` + // Minimum execution time: 27_328_000 picoseconds. + Weight::from_parts(27_661_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn award_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `400` + // Estimated: `3642` + // Minimum execution time: 16_067_000 picoseconds. + Weight::from_parts(16_865_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `764` + // Estimated: `8799` + // Minimum execution time: 101_153_000 picoseconds. + Weight::from_parts(102_480_000, 0) + .saturating_add(Weight::from_parts(0, 8799)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:0) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(16400), added: 18875, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `482` + // Measured: `444` // Estimated: `3642` - // Minimum execution time: 46_020_000 picoseconds. - Weight::from_parts(46_711_000, 0) + // Minimum execution time: 38_838_000 picoseconds. + Weight::from_parts(39_549_000, 0) .saturating_add(Weight::from_parts(0, 3642)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `680` + // Estimated: `6196` + // Minimum execution time: 68_592_000 picoseconds. + Weight::from_parts(70_727_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn extend_bounty_expiry() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `358` + // Estimated: `3642` + // Minimum execution time: 11_272_000 picoseconds. + Weight::from_parts(11_592_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:100 w:100) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:200 w:200) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 100]`. - fn spend_funds(_b: u32, ) -> Weight { + fn spend_funds(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1887` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(2_405_233, 0) + // Measured: `0 + b * (297 ±0)` + // Estimated: `1887 + b * (5206 ±0)` + // Minimum execution time: 2_844_000 picoseconds. + Weight::from_parts(2_900_000, 0) .saturating_add(Weight::from_parts(0, 1887)) + // Standard Error: 9_467 + .saturating_add(Weight::from_parts(32_326_595, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) + .saturating_add(Weight::from_parts(0, 5206).saturating_mul(b.into())) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_child_bounties.rs b/polkadot/runtime/rococo/src/weights/pallet_child_bounties.rs index e8c798d45e7..47ae3a5c90d 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_child_bounties.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_child_bounties.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_child_bounties` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_child_bounties // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,69 +50,153 @@ use core::marker::PhantomData; /// Weight functions for `pallet_child_bounties`. pub struct WeightInfo(PhantomData); impl pallet_child_bounties::WeightInfo for WeightInfo { + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyCount` (r:1 w:1) + /// Proof: `ChildBounties::ChildBountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:0 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 16384]`. - fn add_child_bounty(_d: u32, ) -> Weight { + fn add_child_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `540` + // Estimated: `6196` + // Minimum execution time: 57_964_000 picoseconds. + Weight::from_parts(59_559_565, 0) + .saturating_add(Weight::from_parts(0, 6196)) + // Standard Error: 11 + .saturating_add(Weight::from_parts(697, 0).saturating_mul(d.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `594` + // Estimated: `3642` + // Minimum execution time: 17_527_000 picoseconds. + Weight::from_parts(18_257_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `740` + // Estimated: `3642` + // Minimum execution time: 29_354_000 picoseconds. + Weight::from_parts(30_629_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `740` + // Estimated: `3642` + // Minimum execution time: 40_643_000 picoseconds. + Weight::from_parts(42_072_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) fn award_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `637` + // Estimated: `3642` + // Minimum execution time: 18_616_000 picoseconds. + Weight::from_parts(19_316_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn claim_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `576` + // Estimated: `8799` + // Minimum execution time: 96_376_000 picoseconds. + Weight::from_parts(98_476_000, 0) + .saturating_add(Weight::from_parts(0, 8799)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn close_child_bounty_added() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `840` + // Estimated: `6196` + // Minimum execution time: 64_640_000 picoseconds. + Weight::from_parts(66_174_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn close_child_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `1027` + // Estimated: `8799` + // Minimum execution time: 78_159_000 picoseconds. + Weight::from_parts(79_820_000, 0) + .saturating_add(Weight::from_parts(0, 8799)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(7)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs b/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs index ba505737f1b..5d92c158df4 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs @@ -16,17 +16,17 @@ //! Autogenerated weights for `pallet_conviction_voting` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot // benchmark // pallet -// --chain=kusama-dev +// --chain=rococo-dev // --steps=50 // --repeat=20 // --no-storage-info @@ -36,8 +36,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/kusama/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,144 +50,152 @@ use core::marker::PhantomData; /// Weight functions for `pallet_conviction_voting`. pub struct WeightInfo(PhantomData); impl pallet_conviction_voting::WeightInfo for WeightInfo { - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn vote_new() -> Weight { // Proof Size summary in bytes: - // Measured: `13445` + // Measured: `13407` // Estimated: `42428` - // Minimum execution time: 151_077_000 picoseconds. - Weight::from_parts(165_283_000, 0) + // Minimum execution time: 128_378_000 picoseconds. + Weight::from_parts(131_028_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote_existing() -> Weight { // Proof Size summary in bytes: - // Measured: `14166` + // Measured: `14128` // Estimated: `83866` - // Minimum execution time: 232_420_000 picoseconds. - Weight::from_parts(244_439_000, 0) + // Minimum execution time: 155_379_000 picoseconds. + Weight::from_parts(161_597_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn remove_vote() -> Weight { // Proof Size summary in bytes: // Measured: `13918` // Estimated: `83866` - // Minimum execution time: 205_017_000 picoseconds. - Weight::from_parts(216_594_000, 0) + // Minimum execution time: 130_885_000 picoseconds. + Weight::from_parts(138_080_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn remove_other_vote() -> Weight { // Proof Size summary in bytes: - // Measured: `13004` + // Measured: `13005` // Estimated: `30706` - // Minimum execution time: 84_226_000 picoseconds. - Weight::from_parts(91_255_000, 0) + // Minimum execution time: 71_743_000 picoseconds. + Weight::from_parts(75_170_000, 0) .saturating_add(Weight::from_parts(0, 30706)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: ConvictionVoting VotingFor (r:2 w:2) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:512 w:512) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:512 w:512) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:50) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 512]`. fn delegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `29640 + r * (365 ±0)` + // Measured: `29602 + r * (365 ±0)` // Estimated: `83866 + r * (3411 ±0)` - // Minimum execution time: 78_708_000 picoseconds. - Weight::from_parts(2_053_488_615, 0) + // Minimum execution time: 58_504_000 picoseconds. + Weight::from_parts(814_301_018, 0) .saturating_add(Weight::from_parts(0, 83866)) - // Standard Error: 179_271 - .saturating_add(Weight::from_parts(47_806_482, 0).saturating_mul(r.into())) + // Standard Error: 59_961 + .saturating_add(Weight::from_parts(20_002_833, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes(45)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) } - /// Storage: ConvictionVoting VotingFor (r:2 w:2) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:512 w:512) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:512 w:512) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:50) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 512]`. fn undelegate(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `29555 + r * (365 ±0)` // Estimated: `83866 + r * (3411 ±0)` - // Minimum execution time: 45_232_000 picoseconds. - Weight::from_parts(2_045_021_014, 0) + // Minimum execution time: 34_970_000 picoseconds. + Weight::from_parts(771_155_804, 0) .saturating_add(Weight::from_parts(0, 83866)) - // Standard Error: 185_130 - .saturating_add(Weight::from_parts(47_896_011, 0).saturating_mul(r.into())) + // Standard Error: 57_795 + .saturating_add(Weight::from_parts(19_781_645, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(43)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) fn unlock() -> Weight { // Proof Size summary in bytes: - // Measured: `12218` + // Measured: `12180` // Estimated: `30706` - // Minimum execution time: 116_446_000 picoseconds. - Weight::from_parts(124_043_000, 0) + // Minimum execution time: 89_648_000 picoseconds. + Weight::from_parts(97_144_000, 0) .saturating_add(Weight::from_parts(0, 30706)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_identity.rs b/polkadot/runtime/rococo/src/weights/pallet_identity.rs index b334e21ea03..6df16351f2c 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_identity.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_identity.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_identity` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_identity // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,290 +50,291 @@ use core::marker::PhantomData; /// Weight functions for `pallet_identity`. pub struct WeightInfo(PhantomData); impl pallet_identity::WeightInfo for WeightInfo { - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `32 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 12_290_000 picoseconds. - Weight::from_parts(12_664_362, 0) + // Minimum execution time: 7_673_000 picoseconds. + Weight::from_parts(8_351_866, 0) .saturating_add(Weight::from_parts(0, 2626)) - // Standard Error: 1_347 - .saturating_add(Weight::from_parts(88_179, 0).saturating_mul(r.into())) + // Standard Error: 1_302 + .saturating_add(Weight::from_parts(79_198, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn set_identity(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `442 + r * (5 ±0)` - // Estimated: `11003` - // Minimum execution time: 31_373_000 picoseconds. - Weight::from_parts(30_435_545, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 2_307 - .saturating_add(Weight::from_parts(92_753, 0).saturating_mul(r.into())) + // Measured: `6978 + r * (5 ±0)` + // Estimated: `11037` + // Minimum execution time: 111_646_000 picoseconds. + Weight::from_parts(113_254_991, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 6_611 + .saturating_add(Weight::from_parts(162_119, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:100 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:100 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `101` - // Estimated: `11003 + s * (2589 ±0)` - // Minimum execution time: 9_251_000 picoseconds. - Weight::from_parts(22_039_210, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 40_779 - .saturating_add(Weight::from_parts(2_898_525, 0).saturating_mul(s.into())) + // Estimated: `11037 + s * (2589 ±0)` + // Minimum execution time: 8_010_000 picoseconds. + Weight::from_parts(19_868_412, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 5_018 + .saturating_add(Weight::from_parts(3_115_007, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into())) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 100]`. fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `194 + p * (32 ±0)` - // Estimated: `11003` - // Minimum execution time: 9_329_000 picoseconds. - Weight::from_parts(24_055_061, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 3_428 - .saturating_add(Weight::from_parts(1_130_604, 0).saturating_mul(p.into())) + // Estimated: `11037` + // Minimum execution time: 8_111_000 picoseconds. + Weight::from_parts(19_482_392, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 3_156 + .saturating_add(Weight::from_parts(1_305_890, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. - fn clear_identity(_r: u32, s: u32, ) -> Weight { + fn clear_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `469 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 53_365_000 picoseconds. - Weight::from_parts(35_391_422, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 1_353 - .saturating_add(Weight::from_parts(1_074_019, 0).saturating_mul(s.into())) + // Measured: `7070 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037` + // Minimum execution time: 54_107_000 picoseconds. + Weight::from_parts(56_347_715, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 10_944 + .saturating_add(Weight::from_parts(191_321, 0).saturating_mul(r.into())) + // Standard Error: 2_135 + .saturating_add(Weight::from_parts(1_295_872, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn request_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `367 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 32_509_000 picoseconds. - Weight::from_parts(31_745_585, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 2_214 - .saturating_add(Weight::from_parts(83_822, 0).saturating_mul(r.into())) - + // Measured: `6968 + r * (57 ±0)` + // Estimated: `11037` + // Minimum execution time: 75_780_000 picoseconds. + Weight::from_parts(76_869_773, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 5_456 + .saturating_add(Weight::from_parts(135_316, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn cancel_request(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `398 + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 29_609_000 picoseconds. - Weight::from_parts(28_572_602, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 2_528 - .saturating_add(Weight::from_parts(85_593, 0).saturating_mul(r.into())) + // Measured: `6999` + // Estimated: `11037` + // Minimum execution time: 75_769_000 picoseconds. + Weight::from_parts(76_805_143, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 3_598 + .saturating_add(Weight::from_parts(84_593, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_793_000 picoseconds. - Weight::from_parts(8_173_888, 0) + // Minimum execution time: 5_357_000 picoseconds. + Weight::from_parts(5_732_132, 0) .saturating_add(Weight::from_parts(0, 2626)) - // Standard Error: 1_569 - .saturating_add(Weight::from_parts(72_367, 0).saturating_mul(r.into())) + // Standard Error: 927 + .saturating_add(Weight::from_parts(70_832, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_708_000 picoseconds. - Weight::from_parts(8_091_149, 0) + // Minimum execution time: 5_484_000 picoseconds. + Weight::from_parts(5_892_704, 0) .saturating_add(Weight::from_parts(0, 2626)) - // Standard Error: 869 - .saturating_add(Weight::from_parts(87_993, 0).saturating_mul(r.into())) + // Standard Error: 947 + .saturating_add(Weight::from_parts(71_231, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_601_000 picoseconds. - Weight::from_parts(8_038_414, 0) + // Minimum execution time: 5_310_000 picoseconds. + Weight::from_parts(5_766_651, 0) .saturating_add(Weight::from_parts(0, 2626)) - // Standard Error: 1_041 - .saturating_add(Weight::from_parts(82_588, 0).saturating_mul(r.into())) + // Standard Error: 916 + .saturating_add(Weight::from_parts(74_776, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn provide_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `445 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 23_114_000 picoseconds. - Weight::from_parts(22_076_548, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 2_881 - .saturating_add(Weight::from_parts(109_812, 0).saturating_mul(r.into())) + // Measured: `7046 + r * (57 ±0)` + // Estimated: `11037` + // Minimum execution time: 98_200_000 picoseconds. + Weight::from_parts(100_105_482, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 6_152 + .saturating_add(Weight::from_parts(58_906, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. fn kill_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `676 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 70_007_000 picoseconds. - Weight::from_parts(50_186_495, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 6_533 - .saturating_add(Weight::from_parts(15_486, 0).saturating_mul(r.into())) - // Standard Error: 1_275 - .saturating_add(Weight::from_parts(1_085_117, 0).saturating_mul(s.into())) + // Measured: `7277 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037` + // Minimum execution time: 64_647_000 picoseconds. + Weight::from_parts(68_877_027, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 9_965 + .saturating_add(Weight::from_parts(135_044, 0).saturating_mul(r.into())) + // Standard Error: 1_944 + .saturating_add(Weight::from_parts(1_388_151, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 99]`. fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `475 + s * (36 ±0)` - // Estimated: `11003` - // Minimum execution time: 28_453_000 picoseconds. - Weight::from_parts(33_165_934, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 1_217 - .saturating_add(Weight::from_parts(65_401, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 23_550_000 picoseconds. + Weight::from_parts(29_439_842, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 1_453 + .saturating_add(Weight::from_parts(96_324, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `591 + s * (3 ±0)` - // Estimated: `11003` - // Minimum execution time: 12_846_000 picoseconds. - Weight::from_parts(14_710_284, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 496 - .saturating_add(Weight::from_parts(19_539, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 13_704_000 picoseconds. + Weight::from_parts(15_241_441, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 498 + .saturating_add(Weight::from_parts(40_973, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `638 + s * (35 ±0)` - // Estimated: `11003` - // Minimum execution time: 32_183_000 picoseconds. - Weight::from_parts(35_296_731, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 854 - .saturating_add(Weight::from_parts(52_028, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 29_310_000 picoseconds. + Weight::from_parts(31_712_666, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 967 + .saturating_add(Weight::from_parts(81_250, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 99]`. fn quit_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `704 + s * (37 ±0)` // Estimated: `6723` - // Minimum execution time: 24_941_000 picoseconds. - Weight::from_parts(27_433_059, 0) + // Minimum execution time: 22_906_000 picoseconds. + Weight::from_parts(24_638_729, 0) .saturating_add(Weight::from_parts(0, 6723)) - // Standard Error: 856 - .saturating_add(Weight::from_parts(57_463, 0).saturating_mul(s.into())) + // Standard Error: 645 + .saturating_add(Weight::from_parts(75_121, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -340,90 +344,93 @@ impl pallet_identity::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_873_000 picoseconds. - Weight::from_parts(13_873_000, 0) + // Minimum execution time: 6_056_000 picoseconds. + Weight::from_parts(6_349_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `Identity::UsernameAuthorities` (r:0 w:1) + /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn remove_username_authority() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_653_000 picoseconds. - Weight::from_parts(10_653_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `80` + // Estimated: `3517` + // Minimum execution time: 9_003_000 picoseconds. + Weight::from_parts(9_276_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Identity::PendingUsernames` (r:1 w:0) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_username_for() -> Weight { // Proof Size summary in bytes: // Measured: `80` // Estimated: `11037` - // Minimum execution time: 75_928_000 picoseconds. - Weight::from_parts(75_928_000, 0) + // Minimum execution time: 64_724_000 picoseconds. + Weight::from_parts(66_597_000, 0) .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::AccountOfUsername` (r:0 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn accept_username() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `115` // Estimated: `11037` - // Minimum execution time: 38_157_000 picoseconds. - Weight::from_parts(38_157_000, 0) + // Minimum execution time: 19_538_000 picoseconds. + Weight::from_parts(20_204_000, 0) .saturating_add(Weight::from_parts(0, 11037)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn remove_expired_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3542` - // Minimum execution time: 46_821_000 picoseconds. - Weight::from_parts(46_821_000, 0) - .saturating_add(Weight::from_parts(0, 3542)) + // Measured: `115` + // Estimated: `3550` + // Minimum execution time: 16_000_000 picoseconds. + Weight::from_parts(19_354_000, 0) + .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Identity::AccountOfUsername` (r:1 w:0) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_primary_username() -> Weight { // Proof Size summary in bytes: - // Measured: `247` + // Measured: `257` // Estimated: `11037` - // Minimum execution time: 22_515_000 picoseconds. - Weight::from_parts(22_515_000, 0) + // Minimum execution time: 15_298_000 picoseconds. + Weight::from_parts(15_760_000, 0) .saturating_add(Weight::from_parts(0, 11037)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:0) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn remove_dangling_username() -> Weight { // Proof Size summary in bytes: - // Measured: `126` + // Measured: `98` // Estimated: `11037` - // Minimum execution time: 15_997_000 picoseconds. - Weight::from_parts(15_997_000, 0) + // Minimum execution time: 10_829_000 picoseconds. + Weight::from_parts(11_113_000, 0) .saturating_add(Weight::from_parts(0, 11037)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_indices.rs b/polkadot/runtime/rococo/src/weights/pallet_indices.rs index 99ffd3210ed..434db97d4a7 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_indices.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_indices.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_indices` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_indices // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,66 +50,66 @@ use core::marker::PhantomData; /// Weight functions for `pallet_indices`. pub struct WeightInfo(PhantomData); impl pallet_indices::WeightInfo for WeightInfo { - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn claim() -> Weight { // Proof Size summary in bytes: - // Measured: `142` + // Measured: `4` // Estimated: `3534` - // Minimum execution time: 25_107_000 picoseconds. - Weight::from_parts(25_655_000, 0) + // Minimum execution time: 18_092_000 picoseconds. + Weight::from_parts(18_533_000, 0) .saturating_add(Weight::from_parts(0, 3534)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `341` + // Measured: `203` // Estimated: `3593` - // Minimum execution time: 36_208_000 picoseconds. - Weight::from_parts(36_521_000, 0) + // Minimum execution time: 31_616_000 picoseconds. + Weight::from_parts(32_556_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn free() -> Weight { // Proof Size summary in bytes: - // Measured: `238` + // Measured: `100` // Estimated: `3534` - // Minimum execution time: 25_915_000 picoseconds. - Weight::from_parts(26_220_000, 0) + // Minimum execution time: 19_593_000 picoseconds. + Weight::from_parts(20_100_000, 0) .saturating_add(Weight::from_parts(0, 3534)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `341` + // Measured: `203` // Estimated: `3593` - // Minimum execution time: 28_232_000 picoseconds. - Weight::from_parts(28_845_000, 0) + // Minimum execution time: 21_429_000 picoseconds. + Weight::from_parts(22_146_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: - // Measured: `238` + // Measured: `100` // Estimated: `3534` - // Minimum execution time: 27_282_000 picoseconds. - Weight::from_parts(27_754_000, 0) + // Minimum execution time: 20_425_000 picoseconds. + Weight::from_parts(21_023_000, 0) .saturating_add(Weight::from_parts(0, 3534)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs b/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs index e1e360d374a..6ebfcd060b6 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_message_queue` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_message_queue // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,150 +50,149 @@ use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(6), added: 501, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `248` + // Measured: `281` // Estimated: `6050` - // Minimum execution time: 12_106_000 picoseconds. - Weight::from_parts(12_387_000, 0) + // Minimum execution time: 12_830_000 picoseconds. + Weight::from_parts(13_476_000, 0) .saturating_add(Weight::from_parts(0, 6050)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(6), added: 501, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `248` + // Measured: `281` // Estimated: `6050` - // Minimum execution time: 11_227_000 picoseconds. - Weight::from_parts(11_616_000, 0) + // Minimum execution time: 11_583_000 picoseconds. + Weight::from_parts(11_902_000, 0) .saturating_add(Weight::from_parts(0, 6050)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3520` - // Minimum execution time: 5_052_000 picoseconds. - Weight::from_parts(5_216_000, 0) + // Minimum execution time: 3_801_000 picoseconds. + Weight::from_parts(3_943_000, 0) .saturating_add(Weight::from_parts(0, 3520)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: // Measured: `115` // Estimated: `36283` - // Minimum execution time: 6_522_000 picoseconds. - Weight::from_parts(6_794_000, 0) + // Minimum execution time: 5_517_000 picoseconds. + Weight::from_parts(5_861_000, 0) .saturating_add(Weight::from_parts(0, 36283)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: // Measured: `115` // Estimated: `36283` - // Minimum execution time: 6_918_000 picoseconds. - Weight::from_parts(7_083_000, 0) + // Minimum execution time: 5_870_000 picoseconds. + Weight::from_parts(6_028_000, 0) .saturating_add(Weight::from_parts(0, 36283)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 28_445_000 picoseconds. - Weight::from_parts(28_659_000, 0) + // Minimum execution time: 80_681_000 picoseconds. + Weight::from_parts(81_818_000, 0) .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(6), added: 501, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `149` + // Measured: `220` // Estimated: `3520` - // Minimum execution time: 7_224_000 picoseconds. - Weight::from_parts(7_441_000, 0) + // Minimum execution time: 8_641_000 picoseconds. + Weight::from_parts(8_995_000, 0) .saturating_add(Weight::from_parts(0, 3520)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `33232` + // Measured: `32945` // Estimated: `36283` - // Minimum execution time: 45_211_000 picoseconds. - Weight::from_parts(45_505_000, 0) + // Minimum execution time: 38_473_000 picoseconds. + Weight::from_parts(39_831_000, 0) .saturating_add(Weight::from_parts(0, 36283)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `33232` + // Measured: `32945` // Estimated: `36283` - // Minimum execution time: 52_346_000 picoseconds. - Weight::from_parts(52_745_000, 0) + // Minimum execution time: 48_717_000 picoseconds. + Weight::from_parts(49_724_000, 0) .saturating_add(Weight::from_parts(0, 36283)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `33232` + // Measured: `32945` // Estimated: `36283` - // Minimum execution time: 72_567_000 picoseconds. - Weight::from_parts(73_300_000, 0) + // Minimum execution time: 72_718_000 picoseconds. + Weight::from_parts(74_081_000, 0) .saturating_add(Weight::from_parts(0, 36283)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_multisig.rs b/polkadot/runtime/rococo/src/weights/pallet_multisig.rs index a4f33fe198c..f1b81759ece 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_multisig.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_multisig.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_multisig` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_multisig // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,110 +55,110 @@ impl pallet_multisig::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_475_000 picoseconds. - Weight::from_parts(11_904_745, 0) + // Minimum execution time: 12_023_000 picoseconds. + Weight::from_parts(12_643_116, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(492, 0).saturating_mul(z.into())) + // Standard Error: 3 + .saturating_add(Weight::from_parts(582, 0).saturating_mul(z.into())) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + s * (2 ±0)` + // Measured: `229 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 38_857_000 picoseconds. - Weight::from_parts(33_611_791, 0) + // Minimum execution time: 39_339_000 picoseconds. + Weight::from_parts(27_243_033, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 400 - .saturating_add(Weight::from_parts(59_263, 0).saturating_mul(s.into())) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_211, 0).saturating_mul(z.into())) + // Standard Error: 1_319 + .saturating_add(Weight::from_parts(142_212, 0).saturating_mul(s.into())) + // Standard Error: 12 + .saturating_add(Weight::from_parts(1_592, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[3, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_approve(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `211` + // Measured: `248` // Estimated: `6811` - // Minimum execution time: 25_715_000 picoseconds. - Weight::from_parts(20_607_294, 0) + // Minimum execution time: 27_647_000 picoseconds. + Weight::from_parts(15_828_725, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 285 - .saturating_add(Weight::from_parts(58_225, 0).saturating_mul(s.into())) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_160, 0).saturating_mul(z.into())) + // Standard Error: 908 + .saturating_add(Weight::from_parts(130_880, 0).saturating_mul(s.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_532, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `317 + s * (33 ±0)` + // Measured: `354 + s * (33 ±0)` // Estimated: `6811` - // Minimum execution time: 43_751_000 picoseconds. - Weight::from_parts(37_398_513, 0) + // Minimum execution time: 46_971_000 picoseconds. + Weight::from_parts(32_150_393, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 426 - .saturating_add(Weight::from_parts(70_904, 0).saturating_mul(s.into())) - // Standard Error: 4 - .saturating_add(Weight::from_parts(1_235, 0).saturating_mul(z.into())) + // Standard Error: 1_129 + .saturating_add(Weight::from_parts(154_796, 0).saturating_mul(s.into())) + // Standard Error: 11 + .saturating_add(Weight::from_parts(1_603, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + s * (2 ±0)` + // Measured: `229 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 31_278_000 picoseconds. - Weight::from_parts(32_075_573, 0) + // Minimum execution time: 24_947_000 picoseconds. + Weight::from_parts(26_497_183, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 452 - .saturating_add(Weight::from_parts(62_018, 0).saturating_mul(s.into())) + // Standard Error: 1_615 + .saturating_add(Weight::from_parts(147_071, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_approve(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `211` + // Measured: `248` // Estimated: `6811` - // Minimum execution time: 18_178_000 picoseconds. - Weight::from_parts(18_649_867, 0) + // Minimum execution time: 13_897_000 picoseconds. + Weight::from_parts(14_828_339, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 293 - .saturating_add(Weight::from_parts(56_475, 0).saturating_mul(s.into())) + // Standard Error: 1_136 + .saturating_add(Weight::from_parts(133_925, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn cancel_as_multi(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `383 + s * (1 ±0)` + // Measured: `420 + s * (1 ±0)` // Estimated: `6811` - // Minimum execution time: 32_265_000 picoseconds. - Weight::from_parts(32_984_014, 0) + // Minimum execution time: 28_984_000 picoseconds. + Weight::from_parts(29_853_232, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 452 - .saturating_add(Weight::from_parts(59_934, 0).saturating_mul(s.into())) + // Standard Error: 650 + .saturating_add(Weight::from_parts(113_440, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_nis.rs b/polkadot/runtime/rococo/src/weights/pallet_nis.rs index 35dad482129..38b41f3a8e2 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_nis.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_nis.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_nis` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_nis // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,202 +50,186 @@ use core::marker::PhantomData; /// Weight functions for `pallet_nis`. pub struct WeightInfo(PhantomData); impl pallet_nis::WeightInfo for WeightInfo { - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 999]`. fn place_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6209 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 44_704_000 picoseconds. - Weight::from_parts(44_933_886, 0) + // Minimum execution time: 39_592_000 picoseconds. + Weight::from_parts(38_234_037, 0) .saturating_add(Weight::from_parts(0, 51487)) - // Standard Error: 712 - .saturating_add(Weight::from_parts(71_570, 0).saturating_mul(l.into())) + // Standard Error: 1_237 + .saturating_add(Weight::from_parts(88_816, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn place_bid_max() -> Weight { // Proof Size summary in bytes: // Measured: `54211` // Estimated: `51487` - // Minimum execution time: 126_544_000 picoseconds. - Weight::from_parts(128_271_000, 0) + // Minimum execution time: 134_847_000 picoseconds. + Weight::from_parts(139_510_000, 0) .saturating_add(Weight::from_parts(0, 51487)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 1000]`. fn retract_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6209 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 47_640_000 picoseconds. - Weight::from_parts(42_214_261, 0) + // Minimum execution time: 43_330_000 picoseconds. + Weight::from_parts(35_097_881, 0) .saturating_add(Weight::from_parts(0, 51487)) - // Standard Error: 732 - .saturating_add(Weight::from_parts(87_277, 0).saturating_mul(l.into())) + // Standard Error: 1_119 + .saturating_add(Weight::from_parts(73_640, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Nis Summary (r:1 w:0) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nis::Summary` (r:1 w:0) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn fund_deficit() -> Weight { // Proof Size summary in bytes: // Measured: `225` // Estimated: `3593` - // Minimum execution time: 38_031_000 picoseconds. - Weight::from_parts(38_441_000, 0) + // Minimum execution time: 29_989_000 picoseconds. + Weight::from_parts(30_865_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances TotalIssuance (r:1 w:1) - /// Proof: NisCounterpartBalances TotalIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances Account (r:1 w:1) - /// Proof: NisCounterpartBalances Account (max_values: None, max_size: Some(112), added: 2587, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `NisCounterpartBalances::Account` (r:1 w:1) + /// Proof: `NisCounterpartBalances::Account` (`max_values`: None, `max_size`: Some(112), added: 2587, mode: `MaxEncodedLen`) fn communify() -> Weight { // Proof Size summary in bytes: - // Measured: `469` + // Measured: `387` // Estimated: `3593` - // Minimum execution time: 69_269_000 picoseconds. - Weight::from_parts(70_000_000, 0) + // Minimum execution time: 58_114_000 picoseconds. + Weight::from_parts(59_540_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances Account (r:1 w:1) - /// Proof: NisCounterpartBalances Account (max_values: None, max_size: Some(112), added: 2587, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances TotalIssuance (r:1 w:1) - /// Proof: NisCounterpartBalances TotalIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NisCounterpartBalances::Account` (r:1 w:1) + /// Proof: `NisCounterpartBalances::Account` (`max_values`: None, `max_size`: Some(112), added: 2587, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) fn privatize() -> Weight { // Proof Size summary in bytes: - // Measured: `659` + // Measured: `543` // Estimated: `3593` - // Minimum execution time: 85_763_000 picoseconds. - Weight::from_parts(86_707_000, 0) + // Minimum execution time: 75_780_000 picoseconds. + Weight::from_parts(77_097_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) fn thaw_private() -> Weight { // Proof Size summary in bytes: // Measured: `387` // Estimated: `3593` - // Minimum execution time: 47_336_000 picoseconds. - Weight::from_parts(47_623_000, 0) + // Minimum execution time: 46_133_000 picoseconds. + Weight::from_parts(47_250_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances Account (r:1 w:1) - /// Proof: NisCounterpartBalances Account (max_values: None, max_size: Some(112), added: 2587, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances TotalIssuance (r:1 w:1) - /// Proof: NisCounterpartBalances TotalIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `NisCounterpartBalances::Account` (r:1 w:1) + /// Proof: `NisCounterpartBalances::Account` (`max_values`: None, `max_size`: Some(112), added: 2587, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn thaw_communal() -> Weight { // Proof Size summary in bytes: - // Measured: `604` + // Measured: `488` // Estimated: `3593` - // Minimum execution time: 90_972_000 picoseconds. - Weight::from_parts(92_074_000, 0) + // Minimum execution time: 77_916_000 picoseconds. + Weight::from_parts(79_427_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn process_queues() -> Weight { // Proof Size summary in bytes: // Measured: `6658` // Estimated: `7487` - // Minimum execution time: 21_469_000 picoseconds. - Weight::from_parts(21_983_000, 0) + // Minimum execution time: 22_992_000 picoseconds. + Weight::from_parts(24_112_000, 0) .saturating_add(Weight::from_parts(0, 7487)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) fn process_queue() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `51487` - // Minimum execution time: 4_912_000 picoseconds. - Weight::from_parts(5_013_000, 0) + // Minimum execution time: 3_856_000 picoseconds. + Weight::from_parts(4_125_000, 0) .saturating_add(Weight::from_parts(0, 51487)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Nis Receipts (r:0 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:0 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn process_bid() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_048_000 picoseconds. - Weight::from_parts(7_278_000, 0) + // Minimum execution time: 4_344_000 picoseconds. + Weight::from_parts(4_545_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_preimage.rs b/polkadot/runtime/rococo/src/weights/pallet_preimage.rs index e051ebd5bba..7a2b77b84d8 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_preimage.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_preimage.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_preimage` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_preimage // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,184 +50,219 @@ use core::marker::PhantomData; /// Weight functions for `pallet_preimage`. pub struct WeightInfo(PhantomData); impl pallet_preimage::WeightInfo for WeightInfo { - fn ensure_updated(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `193 + n * (91 ±0)` - // Estimated: `3593 + n * (2566 ±0)` - // Minimum execution time: 2_000_000 picoseconds. - Weight::from_parts(2_000_000, 3593) - // Standard Error: 13_720 - .saturating_add(Weight::from_parts(17_309_199, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) - } - - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `3556` - // Minimum execution time: 31_040_000 picoseconds. - Weight::from_parts(31_236_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_974, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `114` + // Estimated: `3568` + // Minimum execution time: 40_363_000 picoseconds. + Weight::from_parts(41_052_000, 0) + .saturating_add(Weight::from_parts(0, 3568)) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_298, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 18_025_000 picoseconds. - Weight::from_parts(18_264_000, 0) + // Minimum execution time: 14_570_000 picoseconds. + Weight::from_parts(14_890_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_974, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_364, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 17_122_000 picoseconds. - Weight::from_parts(17_332_000, 0) + // Minimum execution time: 13_933_000 picoseconds. + Weight::from_parts(14_290_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_968, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_349, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `361` - // Estimated: `3556` - // Minimum execution time: 38_218_000 picoseconds. - Weight::from_parts(39_841_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `315` + // Estimated: `3568` + // Minimum execution time: 54_373_000 picoseconds. + Weight::from_parts(58_205_000, 0) + .saturating_add(Weight::from_parts(0, 3568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3556` - // Minimum execution time: 23_217_000 picoseconds. - Weight::from_parts(24_246_000, 0) + // Minimum execution time: 24_267_000 picoseconds. + Weight::from_parts(27_063_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `260` // Estimated: `3556` - // Minimum execution time: 21_032_000 picoseconds. - Weight::from_parts(21_844_000, 0) + // Minimum execution time: 25_569_000 picoseconds. + Weight::from_parts(27_895_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3556` - // Minimum execution time: 13_954_000 picoseconds. - Weight::from_parts(14_501_000, 0) + // Minimum execution time: 14_182_000 picoseconds. + Weight::from_parts(16_098_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `114` // Estimated: `3556` - // Minimum execution time: 14_874_000 picoseconds. - Weight::from_parts(15_380_000, 0) + // Minimum execution time: 14_681_000 picoseconds. + Weight::from_parts(15_549_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 10_199_000 picoseconds. - Weight::from_parts(10_493_000, 0) + // Minimum execution time: 9_577_000 picoseconds. + Weight::from_parts(10_146_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3556` - // Minimum execution time: 21_772_000 picoseconds. - Weight::from_parts(22_554_000, 0) + // Minimum execution time: 21_003_000 picoseconds. + Weight::from_parts(23_549_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 10_115_000 picoseconds. - Weight::from_parts(10_452_000, 0) + // Minimum execution time: 9_507_000 picoseconds. + Weight::from_parts(10_013_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 10_031_000 picoseconds. - Weight::from_parts(10_310_000, 0) + // Minimum execution time: 9_293_000 picoseconds. + Weight::from_parts(10_055_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Preimage::StatusFor` (r:1023 w:1023) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1023 w:1023) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1023 w:1023) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:0 w:1023) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 1024]`. + fn ensure_updated(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + n * (227 ±0)` + // Estimated: `990 + n * (2603 ±0)` + // Minimum execution time: 48_846_000 picoseconds. + Weight::from_parts(49_378_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 38_493 + .saturating_add(Weight::from_parts(47_418_285, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) + } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_proxy.rs b/polkadot/runtime/rococo/src/weights/pallet_proxy.rs index d9737a85c05..c9202593095 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_proxy.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_proxy.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_proxy` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_proxy // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,172 +50,176 @@ use core::marker::PhantomData; /// Weight functions for `pallet_proxy`. pub struct WeightInfo(PhantomData); impl pallet_proxy::WeightInfo for WeightInfo { - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `227 + p * (37 ±0)` + // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 15_956_000 picoseconds. - Weight::from_parts(16_300_358, 0) + // Minimum execution time: 11_267_000 picoseconds. + Weight::from_parts(11_798_007, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 652 - .saturating_add(Weight::from_parts(30_807, 0).saturating_mul(p.into())) + // Standard Error: 858 + .saturating_add(Weight::from_parts(43_735, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) } - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `554 + a * (68 ±0) + p * (37 ±0)` + // Measured: `416 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 37_584_000 picoseconds. - Weight::from_parts(37_858_207, 0) + // Minimum execution time: 32_791_000 picoseconds. + Weight::from_parts(32_776_904, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_868 - .saturating_add(Weight::from_parts(148_967, 0).saturating_mul(a.into())) - // Standard Error: 1_930 - .saturating_add(Weight::from_parts(13_017, 0).saturating_mul(p.into())) + // Standard Error: 2_382 + .saturating_add(Weight::from_parts(143_857, 0).saturating_mul(a.into())) + // Standard Error: 2_461 + .saturating_add(Weight::from_parts(40_024, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. - fn remove_announcement(a: u32, _p: u32, ) -> Weight { + fn remove_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `469 + a * (68 ±0)` + // Measured: `331 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 24_642_000 picoseconds. - Weight::from_parts(25_526_588, 0) + // Minimum execution time: 21_831_000 picoseconds. + Weight::from_parts(22_479_938, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_138 - .saturating_add(Weight::from_parts(131_157, 0).saturating_mul(a.into())) + // Standard Error: 1_738 + .saturating_add(Weight::from_parts(146_532, 0).saturating_mul(a.into())) + // Standard Error: 1_796 + .saturating_add(Weight::from_parts(7_499, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. - fn reject_announcement(a: u32, _p: u32, ) -> Weight { + fn reject_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `469 + a * (68 ±0)` + // Measured: `331 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 24_377_000 picoseconds. - Weight::from_parts(25_464_033, 0) + // Minimum execution time: 21_776_000 picoseconds. + Weight::from_parts(22_762_843, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_116 - .saturating_add(Weight::from_parts(130_722, 0).saturating_mul(a.into())) + // Standard Error: 1_402 + .saturating_add(Weight::from_parts(137_512, 0).saturating_mul(a.into())) + // Standard Error: 1_449 + .saturating_add(Weight::from_parts(3_645, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `486 + a * (68 ±0) + p * (37 ±0)` + // Measured: `348 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 34_202_000 picoseconds. - Weight::from_parts(34_610_079, 0) + // Minimum execution time: 29_108_000 picoseconds. + Weight::from_parts(29_508_910, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_234 - .saturating_add(Weight::from_parts(134_197, 0).saturating_mul(a.into())) - // Standard Error: 1_275 - .saturating_add(Weight::from_parts(15_970, 0).saturating_mul(p.into())) + // Standard Error: 2_268 + .saturating_add(Weight::from_parts(144_770, 0).saturating_mul(a.into())) + // Standard Error: 2_343 + .saturating_add(Weight::from_parts(25_851, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `227 + p * (37 ±0)` + // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 25_492_000 picoseconds. - Weight::from_parts(25_984_867, 0) + // Minimum execution time: 18_942_000 picoseconds. + Weight::from_parts(19_518_812, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 893 - .saturating_add(Weight::from_parts(51_868, 0).saturating_mul(p.into())) + // Standard Error: 1_078 + .saturating_add(Weight::from_parts(46_147, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `227 + p * (37 ±0)` + // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 25_492_000 picoseconds. - Weight::from_parts(26_283_445, 0) + // Minimum execution time: 18_993_000 picoseconds. + Weight::from_parts(19_871_741, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_442 - .saturating_add(Weight::from_parts(53_504, 0).saturating_mul(p.into())) + // Standard Error: 1_883 + .saturating_add(Weight::from_parts(46_033, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `227 + p * (37 ±0)` + // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 22_083_000 picoseconds. - Weight::from_parts(22_688_835, 0) + // Minimum execution time: 17_849_000 picoseconds. + Weight::from_parts(18_776_170, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 994 - .saturating_add(Weight::from_parts(32_994, 0).saturating_mul(p.into())) + // Standard Error: 1_239 + .saturating_add(Weight::from_parts(27_960, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn create_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `239` + // Measured: `101` // Estimated: `4706` - // Minimum execution time: 27_042_000 picoseconds. - Weight::from_parts(27_624_587, 0) + // Minimum execution time: 20_049_000 picoseconds. + Weight::from_parts(20_881_515, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 671 - .saturating_add(Weight::from_parts(5_888, 0).saturating_mul(p.into())) + // Standard Error: 952 + .saturating_add(Weight::from_parts(5_970, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `264 + p * (37 ±0)` + // Measured: `126 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 23_396_000 picoseconds. - Weight::from_parts(24_003_080, 0) + // Minimum execution time: 18_528_000 picoseconds. + Weight::from_parts(19_384_189, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 684 - .saturating_add(Weight::from_parts(29_878, 0).saturating_mul(p.into())) + // Standard Error: 1_106 + .saturating_add(Weight::from_parts(35_698, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs b/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs index ce9d5fcc0c7..fa2decb1671 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_ranked_collective` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_ranked_collective // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_ranked_collective -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -60,8 +62,8 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `42` // Estimated: `3507` - // Minimum execution time: 13_480_000 picoseconds. - Weight::from_parts(13_786_000, 0) + // Minimum execution time: 13_428_000 picoseconds. + Weight::from_parts(14_019_000, 0) .saturating_add(Weight::from_parts(0, 3507)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) @@ -79,11 +81,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `516 + r * (281 ±0)` // Estimated: `3519 + r * (2529 ±0)` - // Minimum execution time: 28_771_000 picoseconds. - Weight::from_parts(29_256_825, 0) + // Minimum execution time: 28_566_000 picoseconds. + Weight::from_parts(29_346_952, 0) .saturating_add(Weight::from_parts(0, 3519)) - // Standard Error: 21_594 - .saturating_add(Weight::from_parts(14_649_527, 0).saturating_mul(r.into())) + // Standard Error: 21_068 + .saturating_add(Weight::from_parts(14_471_237, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(6)) @@ -103,11 +105,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `214 + r * (17 ±0)` // Estimated: `3507` - // Minimum execution time: 16_117_000 picoseconds. - Weight::from_parts(16_978_453, 0) + // Minimum execution time: 16_161_000 picoseconds. + Weight::from_parts(16_981_334, 0) .saturating_add(Weight::from_parts(0, 3507)) - // Standard Error: 4_511 - .saturating_add(Weight::from_parts(324_261, 0).saturating_mul(r.into())) + // Standard Error: 4_596 + .saturating_add(Weight::from_parts(313_386, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -124,11 +126,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `532 + r * (72 ±0)` // Estimated: `3519` - // Minimum execution time: 28_995_000 picoseconds. - Weight::from_parts(31_343_215, 0) + // Minimum execution time: 28_406_000 picoseconds. + Weight::from_parts(31_178_557, 0) .saturating_add(Weight::from_parts(0, 3519)) - // Standard Error: 16_438 - .saturating_add(Weight::from_parts(637_462, 0).saturating_mul(r.into())) + // Standard Error: 17_737 + .saturating_add(Weight::from_parts(627_757, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(6)) } @@ -140,15 +142,17 @@ impl pallet_ranked_collective::WeightInfo for WeightInf /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote() -> Weight { // Proof Size summary in bytes: // Measured: `603` // Estimated: `83866` - // Minimum execution time: 38_820_000 picoseconds. - Weight::from_parts(40_240_000, 0) + // Minimum execution time: 41_164_000 picoseconds. + Weight::from_parts(42_163_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -161,11 +165,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `400 + n * (50 ±0)` // Estimated: `4365 + n * (2540 ±0)` - // Minimum execution time: 12_972_000 picoseconds. - Weight::from_parts(15_829_333, 0) + // Minimum execution time: 13_183_000 picoseconds. + Weight::from_parts(15_604_064, 0) .saturating_add(Weight::from_parts(0, 4365)) - // Standard Error: 1_754 - .saturating_add(Weight::from_parts(1_116_520, 0).saturating_mul(n.into())) + // Standard Error: 2_018 + .saturating_add(Weight::from_parts(1_101_088, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -183,8 +187,8 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `337` // Estimated: `6048` - // Minimum execution time: 44_601_000 picoseconds. - Weight::from_parts(45_714_000, 0) + // Minimum execution time: 43_603_000 picoseconds. + Weight::from_parts(44_809_000, 0) .saturating_add(Weight::from_parts(0, 6048)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(10)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_recovery.rs b/polkadot/runtime/rococo/src/weights/pallet_recovery.rs new file mode 100644 index 00000000000..ed79aa2b1f1 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_recovery.rs @@ -0,0 +1,186 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_recovery` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_recovery +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_recovery`. +pub struct WeightInfo(PhantomData); +impl pallet_recovery::WeightInfo for WeightInfo { + /// Storage: `Recovery::Proxy` (r:1 w:0) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + fn as_recovered() -> Weight { + // Proof Size summary in bytes: + // Measured: `215` + // Estimated: `3545` + // Minimum execution time: 7_899_000 picoseconds. + Weight::from_parts(8_205_000, 0) + .saturating_add(Weight::from_parts(0, 3545)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Recovery::Proxy` (r:0 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + fn set_recovered() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_258_000 picoseconds. + Weight::from_parts(6_494_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn create_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `3816` + // Minimum execution time: 19_369_000 picoseconds. + Weight::from_parts(20_185_132, 0) + .saturating_add(Weight::from_parts(0, 3816)) + // Standard Error: 4_275 + .saturating_add(Weight::from_parts(78_024, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + fn initiate_recovery() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `3854` + // Minimum execution time: 22_425_000 picoseconds. + Weight::from_parts(23_171_000, 0) + .saturating_add(Weight::from_parts(0, 3854)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn vouch_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `294 + n * (64 ±0)` + // Estimated: `3854` + // Minimum execution time: 17_308_000 picoseconds. + Weight::from_parts(18_118_782, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 4_309 + .saturating_add(Weight::from_parts(126_278, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn claim_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `326 + n * (64 ±0)` + // Estimated: `3854` + // Minimum execution time: 20_755_000 picoseconds. + Weight::from_parts(21_821_713, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 4_550 + .saturating_add(Weight::from_parts(101_916, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn close_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `447 + n * (32 ±0)` + // Estimated: `3854` + // Minimum execution time: 29_957_000 picoseconds. + Weight::from_parts(31_010_309, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 5_913 + .saturating_add(Weight::from_parts(110_070, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn remove_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `204 + n * (32 ±0)` + // Estimated: `3854` + // Minimum execution time: 24_430_000 picoseconds. + Weight::from_parts(24_462_856, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 13_646 + .saturating_add(Weight::from_parts(507_715, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + fn cancel_recovered() -> Weight { + // Proof Size summary in bytes: + // Measured: `215` + // Estimated: `3545` + // Minimum execution time: 9_686_000 picoseconds. + Weight::from_parts(10_071_000, 0) + .saturating_add(Weight::from_parts(0, 3545)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs b/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs index 96f172230e1..6dfcea2b832 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs @@ -16,27 +16,28 @@ //! Autogenerated weights for `pallet_referenda` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_referenda // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=pallet_referenda -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -59,10 +60,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `327` + // Measured: `292` // Estimated: `42428` - // Minimum execution time: 29_909_000 picoseconds. - Weight::from_parts(30_645_000, 0) + // Minimum execution time: 24_053_000 picoseconds. + Weight::from_parts(25_121_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -71,15 +72,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `438` + // Measured: `403` // Estimated: `83866` - // Minimum execution time: 54_405_000 picoseconds. - Weight::from_parts(55_583_000, 0) + // Minimum execution time: 45_064_000 picoseconds. + Weight::from_parts(46_112_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -89,15 +92,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `2076` + // Measured: `2041` // Estimated: `42428` - // Minimum execution time: 110_477_000 picoseconds. - Weight::from_parts(119_187_000, 0) + // Minimum execution time: 94_146_000 picoseconds. + Weight::from_parts(98_587_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -107,15 +112,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `2117` + // Measured: `2082` // Estimated: `42428` - // Minimum execution time: 111_467_000 picoseconds. - Weight::from_parts(117_758_000, 0) + // Minimum execution time: 93_002_000 picoseconds. + Weight::from_parts(96_924_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -125,15 +132,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `774` + // Measured: `739` // Estimated: `83866` - // Minimum execution time: 191_135_000 picoseconds. - Weight::from_parts(210_535_000, 0) + // Minimum execution time: 160_918_000 picoseconds. + Weight::from_parts(175_603_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -143,24 +152,26 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `639` + // Measured: `604` // Estimated: `83866` - // Minimum execution time: 67_168_000 picoseconds. - Weight::from_parts(68_895_000, 0) + // Minimum execution time: 55_253_000 picoseconds. + Weight::from_parts(56_488_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn refund_decision_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `351` + // Measured: `317` // Estimated: `4365` - // Minimum execution time: 31_298_000 picoseconds. - Weight::from_parts(32_570_000, 0) + // Minimum execution time: 24_497_000 picoseconds. + Weight::from_parts(25_280_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -169,10 +180,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn refund_submission_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `201` + // Measured: `167` // Estimated: `4365` - // Minimum execution time: 15_674_000 picoseconds. - Weight::from_parts(16_190_000, 0) + // Minimum execution time: 11_374_000 picoseconds. + Weight::from_parts(11_817_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -181,15 +192,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `383` + // Measured: `348` // Estimated: `83866` - // Minimum execution time: 38_927_000 picoseconds. - Weight::from_parts(40_545_000, 0) + // Minimum execution time: 31_805_000 picoseconds. + Weight::from_parts(32_622_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -197,15 +210,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `FellowshipReferenda::MetadataOf` (r:1 w:0) /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn kill() -> Weight { // Proof Size summary in bytes: - // Measured: `484` + // Measured: `449` // Estimated: `83866` - // Minimum execution time: 80_209_000 picoseconds. - Weight::from_parts(82_084_000, 0) + // Minimum execution time: 62_364_000 picoseconds. + Weight::from_parts(63_798_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:0) /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) @@ -213,10 +228,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) fn one_fewer_deciding_queue_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `140` // Estimated: `4277` - // Minimum execution time: 9_520_000 picoseconds. - Weight::from_parts(10_088_000, 0) + // Minimum execution time: 8_811_000 picoseconds. + Weight::from_parts(9_224_000, 0) .saturating_add(Weight::from_parts(0, 4277)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -231,10 +246,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `2376` + // Measured: `2341` // Estimated: `42428` - // Minimum execution time: 93_893_000 picoseconds. - Weight::from_parts(101_065_000, 0) + // Minimum execution time: 83_292_000 picoseconds. + Weight::from_parts(89_114_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -249,10 +264,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `2362` + // Measured: `2327` // Estimated: `42428` - // Minimum execution time: 98_811_000 picoseconds. - Weight::from_parts(103_590_000, 0) + // Minimum execution time: 84_648_000 picoseconds. + Weight::from_parts(89_332_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -263,10 +278,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `1841` + // Measured: `1807` // Estimated: `4365` - // Minimum execution time: 43_230_000 picoseconds. - Weight::from_parts(46_120_000, 0) + // Minimum execution time: 40_529_000 picoseconds. + Weight::from_parts(45_217_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -277,10 +292,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `1808` + // Measured: `1774` // Estimated: `4365` - // Minimum execution time: 43_092_000 picoseconds. - Weight::from_parts(46_018_000, 0) + // Minimum execution time: 40_894_000 picoseconds. + Weight::from_parts(45_726_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -293,10 +308,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `1824` + // Measured: `1790` // Estimated: `4365` - // Minimum execution time: 49_697_000 picoseconds. - Weight::from_parts(53_795_000, 0) + // Minimum execution time: 48_187_000 picoseconds. + Weight::from_parts(52_655_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -309,10 +324,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `1865` + // Measured: `1831` // Estimated: `4365` - // Minimum execution time: 50_417_000 picoseconds. - Weight::from_parts(53_214_000, 0) + // Minimum execution time: 47_548_000 picoseconds. + Weight::from_parts(51_547_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -323,10 +338,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_no_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `335` + // Measured: `300` // Estimated: `42428` - // Minimum execution time: 25_688_000 picoseconds. - Weight::from_parts(26_575_000, 0) + // Minimum execution time: 20_959_000 picoseconds. + Weight::from_parts(21_837_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -337,10 +352,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `383` + // Measured: `348` // Estimated: `42428` - // Minimum execution time: 26_230_000 picoseconds. - Weight::from_parts(27_235_000, 0) + // Minimum execution time: 21_628_000 picoseconds. + Weight::from_parts(22_192_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -349,10 +364,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn nudge_referendum_timed_out() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `208` // Estimated: `4365` - // Minimum execution time: 17_585_000 picoseconds. - Weight::from_parts(18_225_000, 0) + // Minimum execution time: 12_309_000 picoseconds. + Weight::from_parts(12_644_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,10 +382,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `584` + // Measured: `549` // Estimated: `42428` - // Minimum execution time: 38_243_000 picoseconds. - Weight::from_parts(39_959_000, 0) + // Minimum execution time: 31_871_000 picoseconds. + Weight::from_parts(33_123_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -385,10 +400,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `719` + // Measured: `684` // Estimated: `42428` - // Minimum execution time: 88_424_000 picoseconds. - Weight::from_parts(92_969_000, 0) + // Minimum execution time: 73_715_000 picoseconds. + Weight::from_parts(79_980_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -401,10 +416,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `770` + // Measured: `735` // Estimated: `42428` - // Minimum execution time: 138_207_000 picoseconds. - Weight::from_parts(151_726_000, 0) + // Minimum execution time: 128_564_000 picoseconds. + Weight::from_parts(138_536_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -417,10 +432,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_end_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `755` + // Measured: `720` // Estimated: `42428` - // Minimum execution time: 131_001_000 picoseconds. - Weight::from_parts(148_651_000, 0) + // Minimum execution time: 129_775_000 picoseconds. + Weight::from_parts(139_001_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -433,10 +448,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_continue_not_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `770` + // Measured: `735` // Estimated: `42428` - // Minimum execution time: 109_612_000 picoseconds. - Weight::from_parts(143_626_000, 0) + // Minimum execution time: 128_233_000 picoseconds. + Weight::from_parts(135_796_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -449,10 +464,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_continue_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `776` + // Measured: `741` // Estimated: `42428` - // Minimum execution time: 71_754_000 picoseconds. - Weight::from_parts(77_329_000, 0) + // Minimum execution time: 66_995_000 picoseconds. + Weight::from_parts(72_678_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -467,10 +482,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn nudge_referendum_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `776` + // Measured: `741` // Estimated: `83866` - // Minimum execution time: 153_244_000 picoseconds. - Weight::from_parts(169_961_000, 0) + // Minimum execution time: 137_764_000 picoseconds. + Weight::from_parts(152_260_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -483,10 +498,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_rejected() -> Weight { // Proof Size summary in bytes: - // Measured: `772` + // Measured: `737` // Estimated: `42428` - // Minimum execution time: 137_997_000 picoseconds. - Weight::from_parts(157_862_000, 0) + // Minimum execution time: 119_992_000 picoseconds. + Weight::from_parts(134_805_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -495,16 +510,18 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `FellowshipReferenda::MetadataOf` (r:0 w:1) /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `458` + // Measured: `424` // Estimated: `4365` - // Minimum execution time: 21_794_000 picoseconds. - Weight::from_parts(22_341_000, 0) + // Minimum execution time: 20_927_000 picoseconds. + Weight::from_parts(21_802_000, 0) .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) @@ -513,10 +530,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `319` + // Measured: `285` // Estimated: `4365` - // Minimum execution time: 18_458_000 picoseconds. - Weight::from_parts(19_097_000, 0) + // Minimum execution time: 14_253_000 picoseconds. + Weight::from_parts(15_031_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs b/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs index b7cc5df28b9..c35925198f9 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs @@ -16,27 +16,28 @@ //! Autogenerated weights for `pallet_referenda` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_referenda // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=pallet_referenda -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -57,10 +58,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `324` + // Measured: `185` // Estimated: `42428` - // Minimum execution time: 39_852_000 picoseconds. - Weight::from_parts(41_610_000, 0) + // Minimum execution time: 28_612_000 picoseconds. + Weight::from_parts(30_060_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) @@ -69,15 +70,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `577` + // Measured: `438` // Estimated: `83866` - // Minimum execution time: 52_588_000 picoseconds. - Weight::from_parts(54_154_000, 0) + // Minimum execution time: 42_827_000 picoseconds. + Weight::from_parts(44_072_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) @@ -87,15 +90,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3334` + // Measured: `3225` // Estimated: `42428` - // Minimum execution time: 70_483_000 picoseconds. - Weight::from_parts(72_731_000, 0) + // Minimum execution time: 56_475_000 picoseconds. + Weight::from_parts(58_888_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) @@ -105,60 +110,62 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3354` + // Measured: `3245` // Estimated: `42428` - // Minimum execution time: 68_099_000 picoseconds. - Weight::from_parts(71_560_000, 0) + // Minimum execution time: 56_542_000 picoseconds. + Weight::from_parts(58_616_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Referenda::DecidingCount` (r:1 w:1) /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `577` + // Measured: `438` // Estimated: `83866` - // Minimum execution time: 64_357_000 picoseconds. - Weight::from_parts(66_081_000, 0) + // Minimum execution time: 51_218_000 picoseconds. + Weight::from_parts(53_148_000, 0) .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Referenda::DecidingCount` (r:1 w:1) /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `577` + // Measured: `438` // Estimated: `83866` - // Minimum execution time: 62_709_000 picoseconds. - Weight::from_parts(64_534_000, 0) + // Minimum execution time: 49_097_000 picoseconds. + Weight::from_parts(50_796_000, 0) .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn refund_decision_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `417` + // Measured: `279` // Estimated: `4401` - // Minimum execution time: 31_296_000 picoseconds. - Weight::from_parts(32_221_000, 0) + // Minimum execution time: 23_720_000 picoseconds. + Weight::from_parts(24_327_000, 0) .saturating_add(Weight::from_parts(0, 4401)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -167,10 +174,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn refund_submission_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `407` + // Measured: `269` // Estimated: `4401` - // Minimum execution time: 31_209_000 picoseconds. - Weight::from_parts(32_168_000, 0) + // Minimum execution time: 24_089_000 picoseconds. + Weight::from_parts(24_556_000, 0) .saturating_add(Weight::from_parts(0, 4401)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -179,15 +186,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `485` + // Measured: `346` // Estimated: `83866` - // Minimum execution time: 38_887_000 picoseconds. - Weight::from_parts(40_193_000, 0) + // Minimum execution time: 29_022_000 picoseconds. + Weight::from_parts(29_590_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) @@ -195,15 +204,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Referenda::MetadataOf` (r:1 w:0) /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn kill() -> Weight { // Proof Size summary in bytes: - // Measured: `726` + // Measured: `587` // Estimated: `83866` - // Minimum execution time: 106_054_000 picoseconds. - Weight::from_parts(108_318_000, 0) + // Minimum execution time: 81_920_000 picoseconds. + Weight::from_parts(84_492_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::TrackQueue` (r:1 w:0) /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) @@ -211,10 +222,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) fn one_fewer_deciding_queue_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `240` + // Measured: `102` // Estimated: `5477` - // Minimum execution time: 9_263_000 picoseconds. - Weight::from_parts(9_763_000, 0) + // Minimum execution time: 8_134_000 picoseconds. + Weight::from_parts(8_574_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -223,36 +234,32 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `3254` + // Measured: `3115` // Estimated: `42428` - // Minimum execution time: 50_080_000 picoseconds. - Weight::from_parts(51_858_000, 0) + // Minimum execution time: 39_932_000 picoseconds. + Weight::from_parts(42_086_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Referenda::TrackQueue` (r:1 w:1) /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `3254` + // Measured: `3115` // Estimated: `42428` - // Minimum execution time: 53_889_000 picoseconds. - Weight::from_parts(55_959_000, 0) + // Minimum execution time: 42_727_000 picoseconds. + Weight::from_parts(44_280_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) @@ -261,10 +268,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `3077` + // Measured: `2939` // Estimated: `5477` - // Minimum execution time: 23_266_000 picoseconds. - Weight::from_parts(24_624_000, 0) + // Minimum execution time: 20_918_000 picoseconds. + Weight::from_parts(22_180_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -275,10 +282,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `3077` + // Measured: `2939` // Estimated: `5477` - // Minimum execution time: 22_846_000 picoseconds. - Weight::from_parts(24_793_000, 0) + // Minimum execution time: 20_943_000 picoseconds. + Weight::from_parts(21_932_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -291,10 +298,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3081` + // Measured: `2943` // Estimated: `5477` - // Minimum execution time: 28_284_000 picoseconds. - Weight::from_parts(29_940_000, 0) + // Minimum execution time: 25_197_000 picoseconds. + Weight::from_parts(26_083_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -307,10 +314,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3101` + // Measured: `2963` // Estimated: `5477` - // Minimum execution time: 28_133_000 picoseconds. - Weight::from_parts(29_638_000, 0) + // Minimum execution time: 24_969_000 picoseconds. + Weight::from_parts(26_096_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -321,10 +328,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_no_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `437` + // Measured: `298` // Estimated: `42428` - // Minimum execution time: 25_710_000 picoseconds. - Weight::from_parts(26_500_000, 0) + // Minimum execution time: 18_050_000 picoseconds. + Weight::from_parts(18_790_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -335,10 +342,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `485` + // Measured: `346` // Estimated: `42428` - // Minimum execution time: 25_935_000 picoseconds. - Weight::from_parts(26_803_000, 0) + // Minimum execution time: 18_357_000 picoseconds. + Weight::from_parts(18_957_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -347,10 +354,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn nudge_referendum_timed_out() -> Weight { // Proof Size summary in bytes: - // Measured: `344` + // Measured: `206` // Estimated: `4401` - // Minimum execution time: 17_390_000 picoseconds. - Weight::from_parts(18_042_000, 0) + // Minimum execution time: 11_479_000 picoseconds. + Weight::from_parts(11_968_000, 0) .saturating_add(Weight::from_parts(0, 4401)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -359,150 +366,136 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Referenda::DecidingCount` (r:1 w:1) /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `485` + // Measured: `346` // Estimated: `42428` - // Minimum execution time: 35_141_000 picoseconds. - Weight::from_parts(36_318_000, 0) + // Minimum execution time: 24_471_000 picoseconds. + Weight::from_parts(25_440_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Referenda::DecidingCount` (r:1 w:1) /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `485` + // Measured: `346` // Estimated: `42428` - // Minimum execution time: 37_815_000 picoseconds. - Weight::from_parts(39_243_000, 0) + // Minimum execution time: 26_580_000 picoseconds. + Weight::from_parts(27_570_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `538` + // Measured: `399` // Estimated: `42428` - // Minimum execution time: 30_779_000 picoseconds. - Weight::from_parts(31_845_000, 0) + // Minimum execution time: 24_331_000 picoseconds. + Weight::from_parts(25_291_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_end_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `521` + // Measured: `382` // Estimated: `42428` - // Minimum execution time: 31_908_000 picoseconds. - Weight::from_parts(33_253_000, 0) + // Minimum execution time: 24_768_000 picoseconds. + Weight::from_parts(25_746_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_continue_not_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `538` + // Measured: `399` // Estimated: `42428` - // Minimum execution time: 28_951_000 picoseconds. - Weight::from_parts(30_004_000, 0) + // Minimum execution time: 23_171_000 picoseconds. + Weight::from_parts(24_161_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_continue_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `542` + // Measured: `403` // Estimated: `42428` - // Minimum execution time: 27_750_000 picoseconds. - Weight::from_parts(28_588_000, 0) + // Minimum execution time: 22_263_000 picoseconds. + Weight::from_parts(23_062_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Lookup` (r:1 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn nudge_referendum_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `542` + // Measured: `403` // Estimated: `83866` - // Minimum execution time: 43_950_000 picoseconds. - Weight::from_parts(46_164_000, 0) + // Minimum execution time: 33_710_000 picoseconds. + Weight::from_parts(34_871_000, 0) .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_rejected() -> Weight { // Proof Size summary in bytes: - // Measured: `538` + // Measured: `399` // Estimated: `42428` - // Minimum execution time: 31_050_000 picoseconds. - Weight::from_parts(32_169_000, 0) + // Minimum execution time: 24_260_000 picoseconds. + Weight::from_parts(25_104_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Referenda::MetadataOf` (r:0 w:1) /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `560` + // Measured: `422` // Estimated: `4401` - // Minimum execution time: 21_193_000 picoseconds. - Weight::from_parts(22_116_000, 0) + // Minimum execution time: 19_821_000 picoseconds. + Weight::from_parts(20_641_000, 0) .saturating_add(Weight::from_parts(0, 4401)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) @@ -511,10 +504,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `421` + // Measured: `283` // Estimated: `4401` - // Minimum execution time: 18_065_000 picoseconds. - Weight::from_parts(18_781_000, 0) + // Minimum execution time: 13_411_000 picoseconds. + Weight::from_parts(14_070_000, 0) .saturating_add(Weight::from_parts(0, 4401)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs b/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs index 0f36dbd384d..5f6b41d2b54 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_scheduler` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_scheduler // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_scheduler -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -54,8 +56,8 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `68` // Estimated: `1489` - // Minimum execution time: 2_869_000 picoseconds. - Weight::from_parts(3_109_000, 0) + // Minimum execution time: 3_114_000 picoseconds. + Weight::from_parts(3_245_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -67,11 +69,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `115 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 3_326_000 picoseconds. - Weight::from_parts(5_818_563, 0) + // Minimum execution time: 3_430_000 picoseconds. + Weight::from_parts(6_250_920, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 1_261 - .saturating_add(Weight::from_parts(336_446, 0).saturating_mul(s.into())) + // Standard Error: 1_350 + .saturating_add(Weight::from_parts(333_245, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -79,8 +81,8 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_007_000 picoseconds. - Weight::from_parts(3_197_000, 0) + // Minimum execution time: 3_166_000 picoseconds. + Weight::from_parts(3_295_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `Preimage::PreimageFor` (r:1 w:1) @@ -94,11 +96,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `251 + s * (1 ±0)` // Estimated: `3716 + s * (1 ±0)` - // Minimum execution time: 16_590_000 picoseconds. - Weight::from_parts(16_869_000, 0) + // Minimum execution time: 17_072_000 picoseconds. + Weight::from_parts(17_393_000, 0) .saturating_add(Weight::from_parts(0, 3716)) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_308, 0).saturating_mul(s.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_204, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) @@ -109,8 +111,8 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_320_000 picoseconds. - Weight::from_parts(4_594_000, 0) + // Minimum execution time: 4_566_000 picoseconds. + Weight::from_parts(4_775_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -118,24 +120,24 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_956_000 picoseconds. - Weight::from_parts(3_216_000, 0) + // Minimum execution time: 3_180_000 picoseconds. + Weight::from_parts(3_339_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute_dispatch_signed() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_824_000 picoseconds. - Weight::from_parts(1_929_000, 0) + // Minimum execution time: 1_656_000 picoseconds. + Weight::from_parts(1_829_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_749_000 picoseconds. - Weight::from_parts(1_916_000, 0) + // Minimum execution time: 1_628_000 picoseconds. + Weight::from_parts(1_840_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `Scheduler::Agenda` (r:1 w:1) @@ -145,16 +147,18 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `115 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 9_086_000 picoseconds. - Weight::from_parts(11_733_696, 0) + // Minimum execution time: 9_523_000 picoseconds. + Weight::from_parts(12_482_434, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 1_362 - .saturating_add(Weight::from_parts(375_266, 0).saturating_mul(s.into())) + // Standard Error: 1_663 + .saturating_add(Weight::from_parts(370_122, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Lookup` (r:0 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 50]`. @@ -162,13 +166,13 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `115 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 12_716_000 picoseconds. - Weight::from_parts(12_529_180, 0) + // Minimum execution time: 14_649_000 picoseconds. + Weight::from_parts(14_705_132, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 867 - .saturating_add(Weight::from_parts(548_188, 0).saturating_mul(s.into())) + // Standard Error: 1_126 + .saturating_add(Weight::from_parts(547_438, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Scheduler::Lookup` (r:1 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) @@ -179,11 +183,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292 + s * (185 ±0)` // Estimated: `42428` - // Minimum execution time: 12_053_000 picoseconds. - Weight::from_parts(15_358_056, 0) + // Minimum execution time: 12_335_000 picoseconds. + Weight::from_parts(16_144_217, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 3_176 - .saturating_add(Weight::from_parts(421_589, 0).saturating_mul(s.into())) + // Standard Error: 3_533 + .saturating_add(Weight::from_parts(413_823, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -191,49 +195,48 @@ impl pallet_scheduler::WeightInfo for WeightInfo { /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 50]`. fn cancel_named(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `318 + s * (185 ±0)` // Estimated: `42428` - // Minimum execution time: 14_803_000 picoseconds. - Weight::from_parts(15_805_714, 0) + // Minimum execution time: 16_906_000 picoseconds. + Weight::from_parts(17_846_662, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 2_597 - .saturating_add(Weight::from_parts(611_053, 0).saturating_mul(s.into())) + // Standard Error: 2_687 + .saturating_add(Weight::from_parts(613_356, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `Scheduler::Retries` (r:1 w:2) - /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Lookup` (r:0 w:1) - /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 50]`. fn schedule_retry(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `196` + // Measured: `155` // Estimated: `42428` - // Minimum execution time: 13_156_000 picoseconds. - Weight::from_parts(13_801_287, 0) + // Minimum execution time: 8_988_000 picoseconds. + Weight::from_parts(9_527_838, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 568 - .saturating_add(Weight::from_parts(35_441, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) + // Standard Error: 523 + .saturating_add(Weight::from_parts(25_453, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Scheduler::Agenda` (r:1 w:0) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Retries` (r:0 w:1) /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 50]`. fn set_retry() -> Weight { // Proof Size summary in bytes: - // Measured: `115 + s * (177 ±0)` + // Measured: `8965` // Estimated: `42428` - // Minimum execution time: 7_912_000 picoseconds. - Weight::from_parts(8_081_460, 0) + // Minimum execution time: 23_337_000 picoseconds. + Weight::from_parts(24_255_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -244,13 +247,12 @@ impl pallet_scheduler::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Retries` (r:0 w:1) /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 50]`. fn set_retry_named() -> Weight { // Proof Size summary in bytes: - // Measured: `324 + s * (185 ±0)` + // Measured: `9643` // Estimated: `42428` - // Minimum execution time: 10_673_000 picoseconds. - Weight::from_parts(12_212_185, 0) + // Minimum execution time: 30_704_000 picoseconds. + Weight::from_parts(31_646_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -259,13 +261,12 @@ impl pallet_scheduler::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Retries` (r:0 w:1) /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 50]`. fn cancel_retry() -> Weight { // Proof Size summary in bytes: - // Measured: `115 + s * (177 ±0)` + // Measured: `8977` // Estimated: `42428` - // Minimum execution time: 7_912_000 picoseconds. - Weight::from_parts(8_081_460, 0) + // Minimum execution time: 22_279_000 picoseconds. + Weight::from_parts(23_106_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -276,13 +277,12 @@ impl pallet_scheduler::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Retries` (r:0 w:1) /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 50]`. fn cancel_retry_named() -> Weight { // Proof Size summary in bytes: - // Measured: `324 + s * (185 ±0)` + // Measured: `9655` // Estimated: `42428` - // Minimum execution time: 10_673_000 picoseconds. - Weight::from_parts(12_212_185, 0) + // Minimum execution time: 29_649_000 picoseconds. + Weight::from_parts(30_472_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_sudo.rs b/polkadot/runtime/rococo/src/weights/pallet_sudo.rs index 694174954fc..ecc31dc3fa9 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_sudo.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_sudo.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_sudo` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_sudo // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_sudo -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -54,8 +56,8 @@ impl pallet_sudo::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 8_432_000 picoseconds. - Weight::from_parts(8_757_000, 0) + // Minimum execution time: 8_336_000 picoseconds. + Weight::from_parts(8_569_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -66,8 +68,8 @@ impl pallet_sudo::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 9_167_000 picoseconds. - Weight::from_parts(9_397_000, 0) + // Minimum execution time: 8_858_000 picoseconds. + Weight::from_parts(9_238_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -77,8 +79,8 @@ impl pallet_sudo::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 9_133_000 picoseconds. - Weight::from_parts(9_573_000, 0) + // Minimum execution time: 8_921_000 picoseconds. + Weight::from_parts(9_324_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -88,10 +90,21 @@ impl pallet_sudo::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 7_374_000 picoseconds. - Weight::from_parts(7_702_000, 0) + // Minimum execution time: 7_398_000 picoseconds. + Weight::from_parts(7_869_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 3_146_000 picoseconds. + Weight::from_parts(3_314_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_timestamp.rs b/polkadot/runtime/rococo/src/weights/pallet_timestamp.rs index 1bb2e227ab7..7d79621b9e6 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_timestamp.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_timestamp.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_timestamp` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_timestamp // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,26 +50,26 @@ use core::marker::PhantomData; /// Weight functions for `pallet_timestamp`. pub struct WeightInfo(PhantomData); impl pallet_timestamp::WeightInfo for WeightInfo { - /// Storage: Timestamp Now (r:1 w:1) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe CurrentSlot (r:1 w:0) - /// Proof: Babe CurrentSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::CurrentSlot` (r:1 w:0) + /// Proof: `Babe::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set() -> Weight { // Proof Size summary in bytes: - // Measured: `311` + // Measured: `137` // Estimated: `1493` - // Minimum execution time: 10_103_000 picoseconds. - Weight::from_parts(10_597_000, 0) + // Minimum execution time: 5_596_000 picoseconds. + Weight::from_parts(5_823_000, 0) .saturating_add(Weight::from_parts(0, 1493)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } fn on_finalize() -> Weight { // Proof Size summary in bytes: - // Measured: `94` + // Measured: `57` // Estimated: `0` - // Minimum execution time: 4_718_000 picoseconds. - Weight::from_parts(4_839_000, 0) + // Minimum execution time: 2_777_000 picoseconds. + Weight::from_parts(2_900_000, 0) .saturating_add(Weight::from_parts(0, 0)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_transaction_payment.rs b/polkadot/runtime/rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 00000000000..44dfab289fb --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,68 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_transaction_payment +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `252` + // Estimated: `1737` + // Minimum execution time: 33_070_000 picoseconds. + Weight::from_parts(33_730_000, 0) + .saturating_add(Weight::from_parts(0, 1737)) + .saturating_add(T::DbWeight::get().reads(3)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_treasury.rs b/polkadot/runtime/rococo/src/weights/pallet_treasury.rs index 144e9d5b872..acf09989afc 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_treasury.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_treasury.rs @@ -16,25 +16,28 @@ //! Autogenerated weights for `pallet_treasury` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-07, STEPS: `50`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/debug/polkadot +// ./target/production/polkadot // benchmark // pallet // --chain=rococo-dev // --steps=50 -// --repeat=2 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_treasury // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./runtime/rococo/src/weights/ -// --header=./file_header.txt +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,176 +50,168 @@ use core::marker::PhantomData; /// Weight functions for `pallet_treasury`. pub struct WeightInfo(PhantomData); impl pallet_treasury::WeightInfo for WeightInfo { - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn spend_local() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `142` // Estimated: `1887` - // Minimum execution time: 177_000_000 picoseconds. - Weight::from_parts(191_000_000, 0) + // Minimum execution time: 9_928_000 picoseconds. + Weight::from_parts(10_560_000, 0) .saturating_add(Weight::from_parts(0, 1887)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn propose_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `143` + // Measured: `243` // Estimated: `1489` - // Minimum execution time: 354_000_000 picoseconds. - Weight::from_parts(376_000_000, 0) + // Minimum execution time: 20_714_000 picoseconds. + Weight::from_parts(21_137_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Treasury Proposals (r:1 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Treasury::Proposals` (r:1 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn reject_proposal() -> Weight { // Proof Size summary in bytes: - // Measured: `301` + // Measured: `401` // Estimated: `3593` - // Minimum execution time: 547_000_000 picoseconds. - Weight::from_parts(550_000_000, 0) + // Minimum execution time: 31_665_000 picoseconds. + Weight::from_parts(32_442_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Treasury Proposals (r:1 w:0) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Proposals` (r:1 w:0) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn approve_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `470 + p * (8 ±0)` + // Measured: `570 + p * (8 ±0)` // Estimated: `3573` - // Minimum execution time: 104_000_000 picoseconds. - Weight::from_parts(121_184_402, 0) + // Minimum execution time: 6_988_000 picoseconds. + Weight::from_parts(11_464_972, 0) .saturating_add(Weight::from_parts(0, 3573)) - // Standard Error: 42_854 - .saturating_add(Weight::from_parts(153_112, 0).saturating_mul(p.into())) + // Standard Error: 1_722 + .saturating_add(Weight::from_parts(84_536, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn remove_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `127` + // Measured: `227` // Estimated: `1887` - // Minimum execution time: 80_000_000 picoseconds. - Weight::from_parts(82_000_000, 0) + // Minimum execution time: 5_386_000 picoseconds. + Weight::from_parts(5_585_000, 0) .saturating_add(Weight::from_parts(0, 1887)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Treasury Deactivated (r:1 w:1) - /// Proof: Treasury Deactivated (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:1) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:99 w:99) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:199 w:199) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Deactivated` (r:1 w:1) + /// Proof: `Treasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:99 w:99) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:199 w:199) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `331 + p * (251 ±0)` + // Measured: `431 + p * (251 ±0)` // Estimated: `3593 + p * (5206 ±0)` - // Minimum execution time: 887_000_000 picoseconds. - Weight::from_parts(828_616_021, 0) + // Minimum execution time: 43_737_000 picoseconds. + Weight::from_parts(39_883_021, 0) .saturating_add(Weight::from_parts(0, 3593)) - // Standard Error: 695_351 - .saturating_add(Weight::from_parts(566_114_524, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Standard Error: 12_917 + .saturating_add(Weight::from_parts(31_796_205, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:0) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) - /// Storage: Treasury SpendCount (r:1 w:1) - /// Proof: Treasury SpendCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Spends (r:0 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:0) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) + /// Storage: `Treasury::SpendCount` (r:1 w:1) + /// Proof: `Treasury::SpendCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Spends` (r:0 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) fn spend() -> Weight { // Proof Size summary in bytes: - // Measured: `114` - // Estimated: `4702` - // Minimum execution time: 208_000_000 picoseconds. - Weight::from_parts(222_000_000, 0) - .saturating_add(Weight::from_parts(0, 4702)) + // Measured: `215` + // Estimated: `4703` + // Minimum execution time: 16_829_000 picoseconds. + Weight::from_parts(17_251_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) - /// Storage: XcmPallet QueryCounter (r:1 w:1) - /// Proof Skipped: XcmPallet QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet Queries (r:0 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `737` - // Estimated: `5313` - // Minimum execution time: 551_000_000 picoseconds. - Weight::from_parts(569_000_000, 0) - .saturating_add(Weight::from_parts(0, 5313)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(6)) + // Measured: `458` + // Estimated: `5318` + // Minimum execution time: 41_554_000 picoseconds. + Weight::from_parts(42_451_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) - /// Storage: XcmPallet Queries (r:1 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) + /// Storage: `XcmPallet::Queries` (r:1 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn check_status() -> Weight { // Proof Size summary in bytes: - // Measured: `442` - // Estimated: `5313` - // Minimum execution time: 245_000_000 picoseconds. - Weight::from_parts(281_000_000, 0) - .saturating_add(Weight::from_parts(0, 5313)) + // Measured: `306` + // Estimated: `5318` + // Minimum execution time: 22_546_000 picoseconds. + Weight::from_parts(23_151_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) fn void_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `5313` - // Minimum execution time: 147_000_000 picoseconds. - Weight::from_parts(160_000_000, 0) - .saturating_add(Weight::from_parts(0, 5313)) + // Measured: `278` + // Estimated: `5318` + // Minimum execution time: 12_169_000 picoseconds. + Weight::from_parts(12_484_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_utility.rs b/polkadot/runtime/rococo/src/weights/pallet_utility.rs index f50f60eaad7..6f2a374247f 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_utility.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_utility.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_utility` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_utility // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,18 +55,18 @@ impl pallet_utility::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_738_000 picoseconds. - Weight::from_parts(2_704_821, 0) + // Minimum execution time: 4_041_000 picoseconds. + Weight::from_parts(5_685_496, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_999 - .saturating_add(Weight::from_parts(4_627_278, 0).saturating_mul(c.into())) + // Standard Error: 810 + .saturating_add(Weight::from_parts(3_177_197, 0).saturating_mul(c.into())) } fn as_derivative() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_294_000 picoseconds. - Weight::from_parts(5_467_000, 0) + // Minimum execution time: 3_667_000 picoseconds. + Weight::from_parts(3_871_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `c` is `[0, 1000]`. @@ -71,18 +74,18 @@ impl pallet_utility::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_828_000 picoseconds. - Weight::from_parts(4_650_678, 0) + // Minimum execution time: 4_116_000 picoseconds. + Weight::from_parts(6_453_932, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_789 - .saturating_add(Weight::from_parts(4_885_004, 0).saturating_mul(c.into())) + // Standard Error: 825 + .saturating_add(Weight::from_parts(3_366_112, 0).saturating_mul(c.into())) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_020_000 picoseconds. - Weight::from_parts(9_205_000, 0) + // Minimum execution time: 5_630_000 picoseconds. + Weight::from_parts(5_956_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `c` is `[0, 1000]`. @@ -90,10 +93,10 @@ impl pallet_utility::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_852_000 picoseconds. - Weight::from_parts(20_703_134, 0) + // Minimum execution time: 4_165_000 picoseconds. + Weight::from_parts(5_442_561, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 3_924 - .saturating_add(Weight::from_parts(4_604_529, 0).saturating_mul(c.into())) + // Standard Error: 460 + .saturating_add(Weight::from_parts(3_173_577, 0).saturating_mul(c.into())) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_vesting.rs b/polkadot/runtime/rococo/src/weights/pallet_vesting.rs index 2596207d583..c21ab087774 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_vesting.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_vesting.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_vesting` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_vesting // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,143 +50,143 @@ use core::marker::PhantomData; /// Weight functions for `pallet_vesting`. pub struct WeightInfo(PhantomData); impl pallet_vesting::WeightInfo for WeightInfo { - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `277 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 32_820_000 picoseconds. - Weight::from_parts(31_640_992, 0) + // Minimum execution time: 29_288_000 picoseconds. + Weight::from_parts(29_095_507, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 449 - .saturating_add(Weight::from_parts(45_254, 0).saturating_mul(l.into())) - // Standard Error: 800 - .saturating_add(Weight::from_parts(72_178, 0).saturating_mul(s.into())) + // Standard Error: 1_679 + .saturating_add(Weight::from_parts(33_164, 0).saturating_mul(l.into())) + // Standard Error: 2_988 + .saturating_add(Weight::from_parts(67_092, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `277 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 36_054_000 picoseconds. - Weight::from_parts(35_825_428, 0) + // Minimum execution time: 31_003_000 picoseconds. + Weight::from_parts(30_528_438, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 749 - .saturating_add(Weight::from_parts(31_738, 0).saturating_mul(l.into())) - // Standard Error: 1_333 - .saturating_add(Weight::from_parts(40_580, 0).saturating_mul(s.into())) + // Standard Error: 1_586 + .saturating_add(Weight::from_parts(35_429, 0).saturating_mul(l.into())) + // Standard Error: 2_823 + .saturating_add(Weight::from_parts(76_505, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_other_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `380 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 35_440_000 picoseconds. - Weight::from_parts(34_652_647, 0) + // Minimum execution time: 31_269_000 picoseconds. + Weight::from_parts(30_661_898, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 517 - .saturating_add(Weight::from_parts(41_942, 0).saturating_mul(l.into())) - // Standard Error: 920 - .saturating_add(Weight::from_parts(66_074, 0).saturating_mul(s.into())) + // Standard Error: 1_394 + .saturating_add(Weight::from_parts(39_300, 0).saturating_mul(l.into())) + // Standard Error: 2_480 + .saturating_add(Weight::from_parts(78_849, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `380 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 38_880_000 picoseconds. - Weight::from_parts(39_625_819, 0) + // Minimum execution time: 33_040_000 picoseconds. + Weight::from_parts(32_469_674, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 1_032 - .saturating_add(Weight::from_parts(29_856, 0).saturating_mul(l.into())) - // Standard Error: 1_837 - .saturating_add(Weight::from_parts(6_210, 0).saturating_mul(s.into())) + // Standard Error: 1_418 + .saturating_add(Weight::from_parts(44_206, 0).saturating_mul(l.into())) + // Standard Error: 2_523 + .saturating_add(Weight::from_parts(74_224, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[0, 27]`. fn vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `451 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 68_294_000 picoseconds. - Weight::from_parts(68_313_394, 0) + // Minimum execution time: 62_032_000 picoseconds. + Weight::from_parts(63_305_621, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 983 - .saturating_add(Weight::from_parts(48_156, 0).saturating_mul(l.into())) - // Standard Error: 1_750 - .saturating_add(Weight::from_parts(87_719, 0).saturating_mul(s.into())) + // Standard Error: 2_277 + .saturating_add(Weight::from_parts(42_767, 0).saturating_mul(l.into())) + // Standard Error: 4_051 + .saturating_add(Weight::from_parts(65_487, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[0, 27]`. fn force_vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `554 + l * (25 ±0) + s * (36 ±0)` // Estimated: `6196` - // Minimum execution time: 70_529_000 picoseconds. - Weight::from_parts(70_619_962, 0) + // Minimum execution time: 63_303_000 picoseconds. + Weight::from_parts(65_180_847, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 1_259 - .saturating_add(Weight::from_parts(50_685, 0).saturating_mul(l.into())) - // Standard Error: 2_241 - .saturating_add(Weight::from_parts(91_444, 0).saturating_mul(s.into())) + // Standard Error: 2_220 + .saturating_add(Weight::from_parts(28_829, 0).saturating_mul(l.into())) + // Standard Error: 3_951 + .saturating_add(Weight::from_parts(84_970, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -192,59 +195,70 @@ impl pallet_vesting::WeightInfo for WeightInfo { /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. - fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `555 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `4764` - // Minimum execution time: 41_497_000 picoseconds. - Weight::from_parts(38_763_834, 4764) - // Standard Error: 2_030 - .saturating_add(Weight::from_parts(99_580, 0).saturating_mul(l.into())) - // Standard Error: 3_750 - .saturating_add(Weight::from_parts(132_188, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `378 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 36_428_000 picoseconds. - Weight::from_parts(35_604_430, 0) + // Minimum execution time: 31_440_000 picoseconds. + Weight::from_parts(30_773_053, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 504 - .saturating_add(Weight::from_parts(43_191, 0).saturating_mul(l.into())) - // Standard Error: 931 - .saturating_add(Weight::from_parts(66_795, 0).saturating_mul(s.into())) + // Standard Error: 1_474 + .saturating_add(Weight::from_parts(43_019, 0).saturating_mul(l.into())) + // Standard Error: 2_723 + .saturating_add(Weight::from_parts(73_360, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `378 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 40_696_000 picoseconds. - Weight::from_parts(39_741_284, 0) + // Minimum execution time: 34_221_000 picoseconds. + Weight::from_parts(33_201_125, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 1_751 + .saturating_add(Weight::from_parts(44_088, 0).saturating_mul(l.into())) + // Standard Error: 3_234 + .saturating_add(Weight::from_parts(86_228, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. + fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `451 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 35_553_000 picoseconds. + Weight::from_parts(34_974_083, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 478 - .saturating_add(Weight::from_parts(43_792, 0).saturating_mul(l.into())) - // Standard Error: 883 - .saturating_add(Weight::from_parts(66_540, 0).saturating_mul(s.into())) + // Standard Error: 1_560 + .saturating_add(Weight::from_parts(34_615, 0).saturating_mul(l.into())) + // Standard Error: 2_882 + .saturating_add(Weight::from_parts(83_419, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs b/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs index 7c307deec4c..ec67268d144 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs @@ -16,26 +16,28 @@ //! Autogenerated weights for `pallet_whitelist` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-aahe6cbd-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_whitelist // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=pallet_whitelist -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,67 +52,75 @@ pub struct WeightInfo(PhantomData); impl pallet_whitelist::WeightInfo for WeightInfo { /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn whitelist_call() -> Weight { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3556` - // Minimum execution time: 20_035_000 picoseconds. - Weight::from_parts(20_452_000, 0) + // Minimum execution time: 16_686_000 picoseconds. + Weight::from_parts(17_042_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn remove_whitelisted_call() -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `3556` - // Minimum execution time: 20_247_000 picoseconds. - Weight::from_parts(20_808_000, 0) + // Minimum execution time: 18_250_000 picoseconds. + Weight::from_parts(19_026_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:1 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 4194294]`. fn dispatch_whitelisted_call(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `428 + n * (1 ±0)` // Estimated: `3892 + n * (1 ±0)` - // Minimum execution time: 32_633_000 picoseconds. - Weight::from_parts(32_855_000, 0) + // Minimum execution time: 28_741_000 picoseconds. + Weight::from_parts(29_024_000, 0) .saturating_add(Weight::from_parts(0, 3892)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_223, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(3)) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_305, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 10000]`. fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `3556` - // Minimum execution time: 23_833_000 picoseconds. - Weight::from_parts(24_698_994, 0) + // Minimum execution time: 21_670_000 picoseconds. + Weight::from_parts(22_561_364, 0) .saturating_add(Weight::from_parts(0, 3556)) // Standard Error: 4 - .saturating_add(Weight::from_parts(1_454, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(Weight::from_parts(1_468, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs index 5544ca44658..d5cf33515e6 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs @@ -17,23 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_xcm // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -60,8 +62,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 25_043_000 picoseconds. - Weight::from_parts(25_682_000, 0) + // Minimum execution time: 25_521_000 picoseconds. + Weight::from_parts(25_922_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -80,8 +82,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 107_570_000 picoseconds. - Weight::from_parts(109_878_000, 0) + // Minimum execution time: 112_185_000 picoseconds. + Weight::from_parts(115_991_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -100,8 +102,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `232` // Estimated: `3697` - // Minimum execution time: 106_341_000 picoseconds. - Weight::from_parts(109_135_000, 0) + // Minimum execution time: 108_693_000 picoseconds. + Weight::from_parts(111_853_000, 0) .saturating_add(Weight::from_parts(0, 3697)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -120,8 +122,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 108_372_000 picoseconds. - Weight::from_parts(112_890_000, 0) + // Minimum execution time: 113_040_000 picoseconds. + Weight::from_parts(115_635_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -130,8 +132,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_957_000 picoseconds. - Weight::from_parts(7_417_000, 0) + // Minimum execution time: 6_979_000 picoseconds. + Weight::from_parts(7_342_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) @@ -140,8 +142,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_053_000 picoseconds. - Weight::from_parts(7_462_000, 0) + // Minimum execution time: 7_144_000 picoseconds. + Weight::from_parts(7_297_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -149,8 +151,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_918_000 picoseconds. - Weight::from_parts(2_037_000, 0) + // Minimum execution time: 1_886_000 picoseconds. + Weight::from_parts(1_995_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -171,8 +173,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 30_417_000 picoseconds. - Weight::from_parts(31_191_000, 0) + // Minimum execution time: 31_238_000 picoseconds. + Weight::from_parts(31_955_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -193,8 +195,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `360` // Estimated: `3825` - // Minimum execution time: 36_666_000 picoseconds. - Weight::from_parts(37_779_000, 0) + // Minimum execution time: 37_237_000 picoseconds. + Weight::from_parts(38_569_000, 0) .saturating_add(Weight::from_parts(0, 3825)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -205,8 +207,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_869_000 picoseconds. - Weight::from_parts(2_003_000, 0) + // Minimum execution time: 1_884_000 picoseconds. + Weight::from_parts(2_028_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -216,8 +218,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `22` // Estimated: `13387` - // Minimum execution time: 16_188_000 picoseconds. - Weight::from_parts(16_435_000, 0) + // Minimum execution time: 16_048_000 picoseconds. + Weight::from_parts(16_617_000, 0) .saturating_add(Weight::from_parts(0, 13387)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -228,8 +230,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `26` // Estimated: `13391` - // Minimum execution time: 16_431_000 picoseconds. - Weight::from_parts(16_935_000, 0) + // Minimum execution time: 16_073_000 picoseconds. + Weight::from_parts(16_672_000, 0) .saturating_add(Weight::from_parts(0, 13391)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -240,8 +242,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `40` // Estimated: `15880` - // Minimum execution time: 18_460_000 picoseconds. - Weight::from_parts(18_885_000, 0) + // Minimum execution time: 18_422_000 picoseconds. + Weight::from_parts(18_900_000, 0) .saturating_add(Weight::from_parts(0, 15880)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -259,8 +261,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `216` // Estimated: `6156` - // Minimum execution time: 29_623_000 picoseconds. - Weight::from_parts(30_661_000, 0) + // Minimum execution time: 30_373_000 picoseconds. + Weight::from_parts(30_972_000, 0) .saturating_add(Weight::from_parts(0, 6156)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -271,8 +273,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `69` // Estimated: `10959` - // Minimum execution time: 12_043_000 picoseconds. - Weight::from_parts(12_360_000, 0) + // Minimum execution time: 11_863_000 picoseconds. + Weight::from_parts(12_270_000, 0) .saturating_add(Weight::from_parts(0, 10959)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -282,8 +284,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `33` // Estimated: `13398` - // Minimum execution time: 16_511_000 picoseconds. - Weight::from_parts(17_011_000, 0) + // Minimum execution time: 16_733_000 picoseconds. + Weight::from_parts(17_094_000, 0) .saturating_add(Weight::from_parts(0, 13398)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -302,8 +304,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `216` // Estimated: `13581` - // Minimum execution time: 39_041_000 picoseconds. - Weight::from_parts(39_883_000, 0) + // Minimum execution time: 39_236_000 picoseconds. + Weight::from_parts(40_587_000, 0) .saturating_add(Weight::from_parts(0, 13581)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) @@ -316,8 +318,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_030_000 picoseconds. - Weight::from_parts(2_150_000, 0) + // Minimum execution time: 2_145_000 picoseconds. + Weight::from_parts(2_255_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -328,8 +330,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 22_615_000 picoseconds. - Weight::from_parts(23_008_000, 0) + // Minimum execution time: 22_518_000 picoseconds. + Weight::from_parts(22_926_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs new file mode 100644 index 00000000000..dc5e5d8ca4b --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs @@ -0,0 +1,191 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_xcm_benchmarks::fungible` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_xcm_benchmarks::fungible +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm_benchmarks::fungible`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm_benchmarks::fungible::WeightInfo for WeightInfo { + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn withdraw_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 27_223_000 picoseconds. + Weight::from_parts(27_947_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `6196` + // Minimum execution time: 36_502_000 picoseconds. + Weight::from_parts(37_023_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn transfer_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `6196` + // Minimum execution time: 85_152_000 picoseconds. + Weight::from_parts(86_442_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn reserve_asset_deposited() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn initiate_reserve_withdraw() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 56_571_000 picoseconds. + Weight::from_parts(58_163_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn receive_teleported_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 27_411_000 picoseconds. + Weight::from_parts(27_953_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn deposit_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 20_776_000 picoseconds. + Weight::from_parts(21_145_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn deposit_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 51_738_000 picoseconds. + Weight::from_parts(53_251_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn initiate_teleport() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 39_333_000 picoseconds. + Weight::from_parts(40_515_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs new file mode 100644 index 00000000000..b62f36172ba --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs @@ -0,0 +1,347 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_xcm_benchmarks::generic` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_xcm_benchmarks::generic +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm_benchmarks::generic`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm_benchmarks::generic::WeightInfo for WeightInfo { + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn report_holding() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 55_210_000 picoseconds. + Weight::from_parts(56_613_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + fn buy_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_246_000 picoseconds. + Weight::from_parts(1_339_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `XcmPallet::Queries` (r:1 w:0) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn query_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 5_377_000 picoseconds. + Weight::from_parts(5_549_000, 0) + .saturating_add(Weight::from_parts(0, 3465)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn transact() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_008_000 picoseconds. + Weight::from_parts(7_361_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn refund_surplus() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_700_000 picoseconds. + Weight::from_parts(1_848_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn set_error_handler() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_198_000 picoseconds. + Weight::from_parts(1_265_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn set_appendix() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_197_000 picoseconds. + Weight::from_parts(1_267_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn clear_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_193_000 picoseconds. + Weight::from_parts(1_258_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn descend_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_268_000 picoseconds. + Weight::from_parts(1_342_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn clear_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_173_000 picoseconds. + Weight::from_parts(1_248_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn report_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 53_715_000 picoseconds. + Weight::from_parts(54_860_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) + /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `23` + // Estimated: `3488` + // Minimum execution time: 8_621_000 picoseconds. + Weight::from_parts(8_903_000, 0) + .saturating_add(Weight::from_parts(0, 3488)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn trap() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_211_000 picoseconds. + Weight::from_parts(1_281_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `XcmPallet::VersionNotifyTargets` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn subscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 26_448_000 picoseconds. + Weight::from_parts(27_057_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmPallet::VersionNotifyTargets` (r:0 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn unsubscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_498_000 picoseconds. + Weight::from_parts(3_614_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn burn_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_575_000 picoseconds. + Weight::from_parts(1_698_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn expect_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_334_000 picoseconds. + Weight::from_parts(1_435_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn expect_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_244_000 picoseconds. + Weight::from_parts(1_337_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn expect_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_244_000 picoseconds. + Weight::from_parts(1_331_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn expect_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_407_000 picoseconds. + Weight::from_parts(1_522_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn query_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 62_963_000 picoseconds. + Weight::from_parts(64_556_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + fn expect_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_458_000 picoseconds. + Weight::from_parts(8_741_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn report_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 54_068_000 picoseconds. + Weight::from_parts(55_665_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + fn clear_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_290_000 picoseconds. + Weight::from_parts(1_348_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn set_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_189_000 picoseconds. + Weight::from_parts(1_268_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn clear_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_197_000 picoseconds. + Weight::from_parts(1_276_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn set_fees_mode() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_197_000 picoseconds. + Weight::from_parts(1_253_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn unpaid_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_250_000 picoseconds. + Weight::from_parts(1_354_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_assigned_slots.rs b/polkadot/runtime/rococo/src/weights/runtime_common_assigned_slots.rs index a6beeded428..f41a5d4ebf8 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_assigned_slots.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_assigned_slots.rs @@ -16,26 +16,28 @@ //! Autogenerated weights for `runtime_common::assigned_slots` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=runtime_common::assigned_slots // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=runtime_common::assigned_slots -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_assigned_slots.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,7 +50,7 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::assigned_slots`. pub struct WeightInfo(PhantomData); impl runtime_common::assigned_slots::WeightInfo for WeightInfo { - /// Storage: `Registrar::Paras` (r:1 w:1) + /// Storage: `Registrar::Paras` (r:1 w:0) /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::ParaLifecycles` (r:1 w:1) /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -68,15 +70,15 @@ impl runtime_common::assigned_slots::WeightInfo for Wei /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn assign_perm_parachain_slot() -> Weight { // Proof Size summary in bytes: - // Measured: `673` - // Estimated: `4138` - // Minimum execution time: 84_646_000 picoseconds. - Weight::from_parts(91_791_000, 0) - .saturating_add(Weight::from_parts(0, 4138)) + // Measured: `730` + // Estimated: `4195` + // Minimum execution time: 71_337_000 picoseconds. + Weight::from_parts(80_807_000, 0) + .saturating_add(Weight::from_parts(0, 4195)) .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: `Registrar::Paras` (r:1 w:1) + /// Storage: `Registrar::Paras` (r:1 w:0) /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::ParaLifecycles` (r:1 w:1) /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -98,13 +100,13 @@ impl runtime_common::assigned_slots::WeightInfo for Wei /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn assign_temp_parachain_slot() -> Weight { // Proof Size summary in bytes: - // Measured: `673` - // Estimated: `4138` - // Minimum execution time: 68_091_000 picoseconds. - Weight::from_parts(77_310_000, 0) - .saturating_add(Weight::from_parts(0, 4138)) + // Measured: `730` + // Estimated: `4195` + // Minimum execution time: 60_188_000 picoseconds. + Weight::from_parts(63_932_000, 0) + .saturating_add(Weight::from_parts(0, 4195)) .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(7)) + .saturating_add(T::DbWeight::get().writes(6)) } /// Storage: `AssignedSlots::PermanentSlots` (r:1 w:0) /// Proof: `AssignedSlots::PermanentSlots` (`max_values`: None, `max_size`: Some(20), added: 2495, mode: `MaxEncodedLen`) @@ -118,11 +120,11 @@ impl runtime_common::assigned_slots::WeightInfo for Wei /// Proof: `AssignedSlots::TemporarySlotCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn unassign_parachain_slot() -> Weight { // Proof Size summary in bytes: - // Measured: `823` - // Estimated: `4288` - // Minimum execution time: 38_081_000 picoseconds. - Weight::from_parts(40_987_000, 0) - .saturating_add(Weight::from_parts(0, 4288)) + // Measured: `856` + // Estimated: `4321` + // Minimum execution time: 35_764_000 picoseconds. + Weight::from_parts(38_355_000, 0) + .saturating_add(Weight::from_parts(0, 4321)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -132,8 +134,8 @@ impl runtime_common::assigned_slots::WeightInfo for Wei // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_182_000 picoseconds. - Weight::from_parts(7_437_000, 0) + // Minimum execution time: 4_634_000 picoseconds. + Weight::from_parts(4_852_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -143,8 +145,8 @@ impl runtime_common::assigned_slots::WeightInfo for Wei // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_153_000 picoseconds. - Weight::from_parts(7_456_000, 0) + // Minimum execution time: 4_563_000 picoseconds. + Weight::from_parts(4_829_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_auctions.rs b/polkadot/runtime/rococo/src/weights/runtime_common_auctions.rs index 3cd7c7a47e9..2b756802289 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_auctions.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_auctions.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::auctions` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::auctions // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_auctions.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_auctions.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,92 +50,90 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::auctions`. pub struct WeightInfo(PhantomData); impl runtime_common::auctions::WeightInfo for WeightInfo { - /// Storage: Auctions AuctionInfo (r:1 w:1) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Auctions AuctionCounter (r:1 w:1) - /// Proof: Auctions AuctionCounter (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Auctions::AuctionInfo` (r:1 w:1) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:1) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn new_auction() -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `1493` - // Minimum execution time: 12_805_000 picoseconds. - Weight::from_parts(13_153_000, 0) + // Minimum execution time: 7_307_000 picoseconds. + Weight::from_parts(7_680_000, 0) .saturating_add(Weight::from_parts(0, 1493)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions AuctionCounter (r:1 w:0) - /// Proof: Auctions AuctionCounter (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Auctions AuctionInfo (r:1 w:0) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Slots Leases (r:1 w:0) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions Winning (r:1 w:1) - /// Proof: Auctions Winning (max_values: None, max_size: Some(1920), added: 4395, mode: MaxEncodedLen) - /// Storage: Auctions ReservedAmounts (r:2 w:2) - /// Proof: Auctions ReservedAmounts (max_values: None, max_size: Some(60), added: 2535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:0) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionInfo` (r:1 w:0) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Slots::Leases` (r:1 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::Winning` (r:1 w:1) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::ReservedAmounts` (r:2 w:2) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn bid() -> Weight { // Proof Size summary in bytes: - // Measured: `728` + // Measured: `761` // Estimated: `6060` - // Minimum execution time: 77_380_000 picoseconds. - Weight::from_parts(80_503_000, 0) + // Minimum execution time: 75_448_000 picoseconds. + Weight::from_parts(78_716_000, 0) .saturating_add(Weight::from_parts(0, 6060)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Auctions AuctionInfo (r:1 w:1) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe NextRandomness (r:1 w:0) - /// Proof: Babe NextRandomness (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: Babe EpochStart (r:1 w:0) - /// Proof: Babe EpochStart (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Auctions AuctionCounter (r:1 w:0) - /// Proof: Auctions AuctionCounter (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Auctions Winning (r:3600 w:3600) - /// Proof: Auctions Winning (max_values: None, max_size: Some(1920), added: 4395, mode: MaxEncodedLen) - /// Storage: Auctions ReservedAmounts (r:37 w:36) - /// Proof: Auctions ReservedAmounts (max_values: None, max_size: Some(60), added: 2535, mode: MaxEncodedLen) - /// Storage: System Account (r:36 w:36) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Slots Leases (r:2 w:2) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) + /// Storage: `Auctions::AuctionInfo` (r:1 w:1) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::NextRandomness` (r:1 w:0) + /// Proof: `Babe::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Babe::EpochStart` (r:1 w:0) + /// Proof: `Babe::EpochStart` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:0) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Auctions::Winning` (r:3600 w:3600) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::ReservedAmounts` (r:37 w:36) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:36 w:36) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Slots::Leases` (r:2 w:2) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn on_initialize() -> Weight { // Proof Size summary in bytes: - // Measured: `6947789` + // Measured: `6947017` // Estimated: `15822990` - // Minimum execution time: 6_311_055_000 picoseconds. - Weight::from_parts(6_409_142_000, 0) + // Minimum execution time: 7_120_207_000 picoseconds. + Weight::from_parts(7_273_496_000, 0) .saturating_add(Weight::from_parts(0, 15822990)) - .saturating_add(T::DbWeight::get().reads(3683)) - .saturating_add(T::DbWeight::get().writes(3678)) + .saturating_add(T::DbWeight::get().reads(3682)) + .saturating_add(T::DbWeight::get().writes(3677)) } - /// Storage: Auctions ReservedAmounts (r:37 w:36) - /// Proof: Auctions ReservedAmounts (max_values: None, max_size: Some(60), added: 2535, mode: MaxEncodedLen) - /// Storage: System Account (r:36 w:36) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Auctions Winning (r:3600 w:3600) - /// Proof: Auctions Winning (max_values: None, max_size: Some(1920), added: 4395, mode: MaxEncodedLen) - /// Storage: Auctions AuctionInfo (r:0 w:1) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Auctions::ReservedAmounts` (r:37 w:36) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:36 w:36) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Auctions::Winning` (r:3600 w:3600) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionInfo` (r:0 w:1) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn cancel_auction() -> Weight { // Proof Size summary in bytes: // Measured: `177732` // Estimated: `15822990` - // Minimum execution time: 4_849_561_000 picoseconds. - Weight::from_parts(4_955_226_000, 0) + // Minimum execution time: 5_536_281_000 picoseconds. + Weight::from_parts(5_675_163_000, 0) .saturating_add(Weight::from_parts(0, 15822990)) .saturating_add(T::DbWeight::get().reads(3673)) .saturating_add(T::DbWeight::get().writes(3673)) diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_claims.rs b/polkadot/runtime/rococo/src/weights/runtime_common_claims.rs index 52e0dd24afa..de2bb71933b 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_claims.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_claims.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::claims` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::claims // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_claims.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_claims.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,120 +50,133 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::claims`. pub struct WeightInfo(PhantomData); impl runtime_common::claims::WeightInfo for WeightInfo { - /// Storage: Claims Claims (r:1 w:1) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:1 w:1) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Total (r:1 w:1) - /// Proof Skipped: Claims Total (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:1 w:1) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Claims::Claims` (r:1 w:1) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:1) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Total` (r:1 w:1) + /// Proof: `Claims::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:1 w:1) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) fn claim() -> Weight { // Proof Size summary in bytes: // Measured: `558` // Estimated: `4764` - // Minimum execution time: 144_931_000 picoseconds. - Weight::from_parts(156_550_000, 0) + // Minimum execution time: 181_028_000 picoseconds. + Weight::from_parts(194_590_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Claims Total (r:1 w:1) - /// Proof Skipped: Claims Total (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:0 w:1) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Claims (r:0 w:1) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:0 w:1) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) + /// Storage: `Claims::Total` (r:1 w:1) + /// Proof: `Claims::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:0 w:1) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Claims` (r:0 w:1) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:0 w:1) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) fn mint_claim() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `1701` - // Minimum execution time: 11_300_000 picoseconds. - Weight::from_parts(11_642_000, 0) + // Minimum execution time: 11_224_000 picoseconds. + Weight::from_parts(13_342_000, 0) .saturating_add(Weight::from_parts(0, 1701)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Claims Claims (r:1 w:1) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:1 w:1) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Total (r:1 w:1) - /// Proof Skipped: Claims Total (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:1 w:1) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Claims::Claims` (r:1 w:1) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:1) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Total` (r:1 w:1) + /// Proof: `Claims::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:1 w:1) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) fn claim_attest() -> Weight { // Proof Size summary in bytes: // Measured: `558` // Estimated: `4764` - // Minimum execution time: 149_112_000 picoseconds. - Weight::from_parts(153_872_000, 0) + // Minimum execution time: 187_964_000 picoseconds. + Weight::from_parts(202_553_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Claims Preclaims (r:1 w:1) - /// Proof Skipped: Claims Preclaims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:1 w:1) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Claims (r:1 w:1) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Total (r:1 w:1) - /// Proof Skipped: Claims Total (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:1 w:1) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Claims::Preclaims` (r:1 w:1) + /// Proof: `Claims::Preclaims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:1) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Claims` (r:1 w:1) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Total` (r:1 w:1) + /// Proof: `Claims::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:1 w:1) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) fn attest() -> Weight { // Proof Size summary in bytes: // Measured: `632` // Estimated: `4764` - // Minimum execution time: 69_619_000 picoseconds. - Weight::from_parts(79_242_000, 0) + // Minimum execution time: 78_210_000 picoseconds. + Weight::from_parts(84_581_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: Claims Claims (r:1 w:2) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:1 w:2) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:1 w:2) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Preclaims (r:1 w:1) - /// Proof Skipped: Claims Preclaims (max_values: None, max_size: None, mode: Measured) + /// Storage: `Claims::Claims` (r:1 w:2) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:1 w:2) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:2) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Preclaims` (r:1 w:1) + /// Proof: `Claims::Preclaims` (`max_values`: None, `max_size`: None, mode: `Measured`) fn move_claim() -> Weight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `3905` - // Minimum execution time: 22_066_000 picoseconds. - Weight::from_parts(22_483_000, 0) + // Minimum execution time: 33_940_000 picoseconds. + Weight::from_parts(48_438_000, 0) .saturating_add(Weight::from_parts(0, 3905)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(7)) } + /// Storage: `Claims::Preclaims` (r:1 w:0) + /// Proof: `Claims::Preclaims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:0) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn prevalidate_attests() -> Weight { + // Proof Size summary in bytes: + // Measured: `296` + // Estimated: `3761` + // Minimum execution time: 9_025_000 picoseconds. + Weight::from_parts(10_563_000, 0) + .saturating_add(Weight::from_parts(0, 3761)) + .saturating_add(T::DbWeight::get().reads(2)) + } } diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs b/polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs new file mode 100644 index 00000000000..d068f07e759 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs @@ -0,0 +1,86 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `runtime_common::coretime` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=runtime_common::coretime +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_common::coretime`. +pub struct WeightInfo(PhantomData); +impl runtime_common::coretime::WeightInfo for WeightInfo { + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn request_core_count() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_543_000 picoseconds. + Weight::from_parts(7_745_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreSchedules` (r:0 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreSchedules` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 100]`. + fn assign_core(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 9_367_000 picoseconds. + Weight::from_parts(9_932_305, 0) + .saturating_add(Weight::from_parts(0, 3645)) + // Standard Error: 231 + .saturating_add(Weight::from_parts(12_947, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_crowdloan.rs b/polkadot/runtime/rococo/src/weights/runtime_common_crowdloan.rs index 0e7420cba2e..8ebab3d551e 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_crowdloan.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_crowdloan.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::crowdloan` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::crowdloan // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_crowdloan.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_crowdloan.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,172 +50,168 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::crowdloan`. pub struct WeightInfo(PhantomData); impl runtime_common::crowdloan::WeightInfo for WeightInfo { - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Crowdloan NextFundIndex (r:1 w:1) - /// Proof Skipped: Crowdloan NextFundIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NextFundIndex` (r:1 w:1) + /// Proof: `Crowdloan::NextFundIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `438` // Estimated: `3903` - // Minimum execution time: 50_399_000 picoseconds. - Weight::from_parts(51_641_000, 0) + // Minimum execution time: 46_095_000 picoseconds. + Weight::from_parts(48_111_000, 0) .saturating_add(Weight::from_parts(0, 3903)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Slots Leases (r:1 w:0) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions AuctionInfo (r:1 w:0) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:1) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Crowdloan EndingsCount (r:1 w:0) - /// Proof Skipped: Crowdloan EndingsCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Crowdloan NewRaise (r:1 w:1) - /// Proof Skipped: Crowdloan NewRaise (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) - /// Proof Skipped: unknown `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:1 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::AuctionInfo` (r:1 w:0) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::EndingsCount` (r:1 w:0) + /// Proof: `Crowdloan::EndingsCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NewRaise` (r:1 w:1) + /// Proof: `Crowdloan::NewRaise` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) fn contribute() -> Weight { // Proof Size summary in bytes: - // Measured: `530` - // Estimated: `3995` - // Minimum execution time: 128_898_000 picoseconds. - Weight::from_parts(130_277_000, 0) - .saturating_add(Weight::from_parts(0, 3995)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `563` + // Estimated: `4028` + // Minimum execution time: 133_059_000 picoseconds. + Weight::from_parts(136_515_000, 0) + .saturating_add(Weight::from_parts(0, 4028)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:1) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: unknown `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) - /// Proof Skipped: unknown `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) fn withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `689` + // Measured: `687` // Estimated: `6196` - // Minimum execution time: 69_543_000 picoseconds. - Weight::from_parts(71_522_000, 0) + // Minimum execution time: 71_733_000 picoseconds. + Weight::from_parts(74_034_000, 0) .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `k` is `[0, 1000]`. fn refund(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `127 + k * (189 ±0)` - // Estimated: `140 + k * (189 ±0)` - // Minimum execution time: 50_735_000 picoseconds. - Weight::from_parts(52_282_000, 0) - .saturating_add(Weight::from_parts(0, 140)) - // Standard Error: 21_607 - .saturating_add(Weight::from_parts(38_955_985, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `125 + k * (189 ±0)` + // Estimated: `138 + k * (189 ±0)` + // Minimum execution time: 46_016_000 picoseconds. + Weight::from_parts(48_260_000, 0) + .saturating_add(Weight::from_parts(0, 138)) + // Standard Error: 21_140 + .saturating_add(Weight::from_parts(39_141_925, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(k.into()))) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 189).saturating_mul(k.into())) } - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn dissolve() -> Weight { // Proof Size summary in bytes: - // Measured: `515` + // Measured: `514` // Estimated: `6196` - // Minimum execution time: 43_100_000 picoseconds. - Weight::from_parts(44_272_000, 0) + // Minimum execution time: 44_724_000 picoseconds. + Weight::from_parts(47_931_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) fn edit() -> Weight { // Proof Size summary in bytes: - // Measured: `235` - // Estimated: `3700` - // Minimum execution time: 18_702_000 picoseconds. - Weight::from_parts(19_408_000, 0) - .saturating_add(Weight::from_parts(0, 3700)) + // Measured: `234` + // Estimated: `3699` + // Minimum execution time: 19_512_000 picoseconds. + Weight::from_parts(21_129_000, 0) + .saturating_add(Weight::from_parts(0, 3699)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Crowdloan Funds (r:1 w:0) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) - /// Proof Skipped: unknown `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Storage: `Crowdloan::Funds` (r:1 w:0) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) fn add_memo() -> Weight { // Proof Size summary in bytes: // Measured: `412` // Estimated: `3877` - // Minimum execution time: 25_568_000 picoseconds. - Weight::from_parts(26_203_000, 0) + // Minimum execution time: 33_529_000 picoseconds. + Weight::from_parts(37_082_000, 0) .saturating_add(Weight::from_parts(0, 3877)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Crowdloan Funds (r:1 w:0) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Crowdloan NewRaise (r:1 w:1) - /// Proof Skipped: Crowdloan NewRaise (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Crowdloan::Funds` (r:1 w:0) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NewRaise` (r:1 w:1) + /// Proof: `Crowdloan::NewRaise` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn poke() -> Weight { // Proof Size summary in bytes: - // Measured: `239` - // Estimated: `3704` - // Minimum execution time: 17_832_000 picoseconds. - Weight::from_parts(18_769_000, 0) - .saturating_add(Weight::from_parts(0, 3704)) + // Measured: `238` + // Estimated: `3703` + // Minimum execution time: 23_153_000 picoseconds. + Weight::from_parts(24_181_000, 0) + .saturating_add(Weight::from_parts(0, 3703)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Auctions AuctionInfo (r:1 w:0) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Crowdloan EndingsCount (r:1 w:1) - /// Proof Skipped: Crowdloan EndingsCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Crowdloan NewRaise (r:1 w:1) - /// Proof Skipped: Crowdloan NewRaise (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Crowdloan Funds (r:100 w:0) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions AuctionCounter (r:1 w:0) - /// Proof: Auctions AuctionCounter (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Paras ParaLifecycles (r:100 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Slots Leases (r:100 w:0) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions Winning (r:1 w:1) - /// Proof: Auctions Winning (max_values: None, max_size: Some(1920), added: 4395, mode: MaxEncodedLen) - /// Storage: Auctions ReservedAmounts (r:100 w:100) - /// Proof: Auctions ReservedAmounts (max_values: None, max_size: Some(60), added: 2535, mode: MaxEncodedLen) - /// Storage: System Account (r:100 w:100) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Auctions::AuctionInfo` (r:1 w:0) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::EndingsCount` (r:1 w:1) + /// Proof: `Crowdloan::EndingsCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NewRaise` (r:1 w:1) + /// Proof: `Crowdloan::NewRaise` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::Funds` (r:100 w:0) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:0) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Paras::ParaLifecycles` (r:100 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:100 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::Winning` (r:1 w:1) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::ReservedAmounts` (r:100 w:100) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:100 w:100) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `n` is `[2, 100]`. fn on_initialize(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `197 + n * (356 ±0)` + // Measured: `229 + n * (356 ±0)` // Estimated: `5385 + n * (2832 ±0)` - // Minimum execution time: 128_319_000 picoseconds. - Weight::from_parts(130_877_000, 0) + // Minimum execution time: 120_164_000 picoseconds. + Weight::from_parts(3_390_119, 0) .saturating_add(Weight::from_parts(0, 5385)) - // Standard Error: 61_381 - .saturating_add(Weight::from_parts(60_209_202, 0).saturating_mul(n.into())) + // Standard Error: 41_727 + .saturating_add(Weight::from_parts(54_453_016, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs b/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs index cec357453b6..9b0cb98e6c0 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs @@ -1,36 +1,43 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Polkadot. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . //! Autogenerated weights for `runtime_common::identity_migrator` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `sbtb`, CPU: `13th Gen Intel(R) Core(TM) i7-1365U` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/release/polkadot +// ./target/production/polkadot // benchmark // pallet // --chain=rococo-dev -// --steps=2 -// --repeat=1 +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::identity_migrator // --extrinsic=* -// --output=./migrator-release.rs +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -44,7 +51,7 @@ use core::marker::PhantomData; pub struct WeightInfo(PhantomData); impl runtime_common::identity_migrator::WeightInfo for WeightInfo { /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) @@ -63,34 +70,34 @@ impl runtime_common::identity_migrator::WeightInfo for /// The range of component `s` is `[0, 100]`. fn reap_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7292 + r * (8 ±0) + s * (32 ±0)` - // Estimated: `11003 + r * (8 ±0) + s * (33 ±0)` - // Minimum execution time: 163_756_000 picoseconds. - Weight::from_parts(158_982_500, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 1_143_629 - .saturating_add(Weight::from_parts(238_675, 0).saturating_mul(r.into())) - // Standard Error: 228_725 - .saturating_add(Weight::from_parts(1_529_645, 0).saturating_mul(s.into())) + // Measured: `7457 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037 + r * (7 ±0) + s * (32 ±0)` + // Minimum execution time: 157_343_000 picoseconds. + Weight::from_parts(159_289_236, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 16_439 + .saturating_add(Weight::from_parts(224_293, 0).saturating_mul(r.into())) + // Standard Error: 3_367 + .saturating_add(Weight::from_parts(1_383_637, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes(6)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) - .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) - .saturating_add(Weight::from_parts(0, 33).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(s.into())) } /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) fn poke_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `7229` - // Estimated: `11003` - // Minimum execution time: 137_570_000 picoseconds. - Weight::from_parts(137_570_000, 0) - .saturating_add(Weight::from_parts(0, 11003)) + // Measured: `7242` + // Estimated: `11037` + // Minimum execution time: 114_384_000 picoseconds. + Weight::from_parts(115_741_000, 0) + .saturating_add(Weight::from_parts(0, 11037)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_paras_registrar.rs b/polkadot/runtime/rococo/src/weights/runtime_common_paras_registrar.rs index 0a56562a1a9..e066106e134 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_paras_registrar.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_paras_registrar.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::paras_registrar` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::paras_registrar // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_paras_registrar.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_paras_registrar.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,175 +50,169 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::paras_registrar`. pub struct WeightInfo(PhantomData); impl runtime_common::paras_registrar::WeightInfo for WeightInfo { - /// Storage: Registrar NextFreeParaId (r:1 w:1) - /// Proof Skipped: Registrar NextFreeParaId (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::NextFreeParaId` (r:1 w:1) + /// Proof: `Registrar::NextFreeParaId` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve() -> Weight { // Proof Size summary in bytes: - // Measured: `97` - // Estimated: `3562` - // Minimum execution time: 29_948_000 picoseconds. - Weight::from_parts(30_433_000, 0) - .saturating_add(Weight::from_parts(0, 3562)) + // Measured: `96` + // Estimated: `3561` + // Minimum execution time: 24_109_000 picoseconds. + Weight::from_parts(24_922_000, 0) + .saturating_add(Weight::from_parts(0, 3561)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:1 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:0 w:1) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpcomingParasGenesis (r:0 w:1) - /// Proof Skipped: Paras UpcomingParasGenesis (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:0 w:1) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpcomingParasGenesis` (r:0 w:1) + /// Proof: `Paras::UpcomingParasGenesis` (`max_values`: None, `max_size`: None, mode: `Measured`) fn register() -> Weight { // Proof Size summary in bytes: - // Measured: `616` - // Estimated: `4081` - // Minimum execution time: 6_332_113_000 picoseconds. - Weight::from_parts(6_407_158_000, 0) - .saturating_add(Weight::from_parts(0, 4081)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `352` + // Estimated: `3817` + // Minimum execution time: 7_207_580_000 picoseconds. + Weight::from_parts(7_298_567_000, 0) + .saturating_add(Weight::from_parts(0, 3817)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(8)) } - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:1 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:0 w:1) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpcomingParasGenesis (r:0 w:1) - /// Proof Skipped: Paras UpcomingParasGenesis (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:0 w:1) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpcomingParasGenesis` (r:0 w:1) + /// Proof: `Paras::UpcomingParasGenesis` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_register() -> Weight { // Proof Size summary in bytes: - // Measured: `533` - // Estimated: `3998` - // Minimum execution time: 6_245_403_000 picoseconds. - Weight::from_parts(6_289_575_000, 0) - .saturating_add(Weight::from_parts(0, 3998)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `269` + // Estimated: `3734` + // Minimum execution time: 7_196_460_000 picoseconds. + Weight::from_parts(7_385_729_000, 0) + .saturating_add(Weight::from_parts(0, 3734)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(8)) } - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeHash (r:1 w:0) - /// Proof Skipped: Paras FutureCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: Registrar PendingSwap (r:0 w:1) - /// Proof Skipped: Registrar PendingSwap (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:0) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `Registrar::PendingSwap` (r:0 w:1) + /// Proof: `Registrar::PendingSwap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `476` - // Estimated: `3941` - // Minimum execution time: 49_822_000 picoseconds. - Weight::from_parts(50_604_000, 0) - .saturating_add(Weight::from_parts(0, 3941)) + // Measured: `499` + // Estimated: `3964` + // Minimum execution time: 54_761_000 picoseconds. + Weight::from_parts(57_931_000, 0) + .saturating_add(Weight::from_parts(0, 3964)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Registrar Paras (r:1 w:0) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:2 w:2) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar PendingSwap (r:1 w:1) - /// Proof Skipped: Registrar PendingSwap (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: Crowdloan Funds (r:2 w:2) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Slots Leases (r:2 w:2) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:2 w:2) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::PendingSwap` (r:1 w:1) + /// Proof: `Registrar::PendingSwap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::Funds` (r:2 w:2) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:2 w:2) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) fn swap() -> Weight { // Proof Size summary in bytes: - // Measured: `780` - // Estimated: `6720` - // Minimum execution time: 55_166_000 picoseconds. - Weight::from_parts(56_913_000, 0) - .saturating_add(Weight::from_parts(0, 6720)) + // Measured: `837` + // Estimated: `6777` + // Minimum execution time: 59_564_000 picoseconds. + Weight::from_parts(62_910_000, 0) + .saturating_add(Weight::from_parts(0, 6777)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(8)) } - /// Storage: Paras FutureCodeHash (r:1 w:1) - /// Proof Skipped: Paras FutureCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeRestrictionSignal (r:1 w:1) - /// Proof Skipped: Paras UpgradeRestrictionSignal (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:1 w:0) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeCooldowns (r:1 w:1) - /// Proof Skipped: Paras UpgradeCooldowns (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:1 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// The range of component `b` is `[1, 3145728]`. + /// Storage: `Paras::FutureCodeHash` (r:1 w:1) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:1 w:1) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeCooldowns` (r:1 w:1) + /// Proof: `Paras::UpgradeCooldowns` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[9, 3145728]`. fn schedule_code_upgrade(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `464` - // Estimated: `3929` - // Minimum execution time: 43_650_000 picoseconds. - Weight::from_parts(43_918_000, 0) - .saturating_add(Weight::from_parts(0, 3929)) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_041, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `201` + // Estimated: `3666` + // Minimum execution time: 33_106_000 picoseconds. + Weight::from_parts(33_526_000, 0) + .saturating_add(Weight::from_parts(0, 3666)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_334, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[1, 1048576]`. fn set_current_head(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_666_000 picoseconds. - Weight::from_parts(8_893_000, 0) + // Minimum execution time: 5_992_000 picoseconds. + Weight::from_parts(12_059_689, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2 - .saturating_add(Weight::from_parts(855, 0).saturating_mul(b.into())) + // Standard Error: 0 + .saturating_add(Weight::from_parts(959, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_slots.rs b/polkadot/runtime/rococo/src/weights/runtime_common_slots.rs index 23ab1ed3ee0..dd10dbbf1f1 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_common_slots.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_common_slots.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::slots` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::slots // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_slots.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_slots.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,86 +50,82 @@ use core::marker::PhantomData; /// Weight functions for `runtime_common::slots`. pub struct WeightInfo(PhantomData); impl runtime_common::slots::WeightInfo for WeightInfo { - /// Storage: Slots Leases (r:1 w:1) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_lease() -> Weight { // Proof Size summary in bytes: - // Measured: `287` - // Estimated: `3752` - // Minimum execution time: 29_932_000 picoseconds. - Weight::from_parts(30_334_000, 0) - .saturating_add(Weight::from_parts(0, 3752)) + // Measured: `320` + // Estimated: `3785` + // Minimum execution time: 26_570_000 picoseconds. + Weight::from_parts(27_619_000, 0) + .saturating_add(Weight::from_parts(0, 3785)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Slots Leases (r:101 w:100) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:200 w:200) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar Paras (r:100 w:100) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::Parachains` (r:1 w:0) + /// Proof: `Paras::Parachains` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:101 w:100) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:200 w:200) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 100]`. /// The range of component `t` is `[0, 100]`. fn manage_lease_period_start(c: u32, t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `26 + c * (47 ±0) + t * (308 ±0)` - // Estimated: `2800 + c * (2526 ±0) + t * (2789 ±0)` - // Minimum execution time: 634_547_000 picoseconds. - Weight::from_parts(643_045_000, 0) - .saturating_add(Weight::from_parts(0, 2800)) - // Standard Error: 81_521 - .saturating_add(Weight::from_parts(2_705_219, 0).saturating_mul(c.into())) - // Standard Error: 81_521 - .saturating_add(Weight::from_parts(11_464_132, 0).saturating_mul(t.into())) + // Measured: `594 + c * (20 ±0) + t * (234 ±0)` + // Estimated: `4065 + c * (2496 ±0) + t * (2709 ±0)` + // Minimum execution time: 729_793_000 picoseconds. + Weight::from_parts(740_820_000, 0) + .saturating_add(Weight::from_parts(0, 4065)) + // Standard Error: 88_206 + .saturating_add(Weight::from_parts(2_793_142, 0).saturating_mul(c.into())) + // Standard Error: 88_206 + .saturating_add(Weight::from_parts(8_933_065, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(t.into()))) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2526).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 2789).saturating_mul(t.into())) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(t.into()))) + .saturating_add(Weight::from_parts(0, 2496).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 2709).saturating_mul(t.into())) } - /// Storage: Slots Leases (r:1 w:1) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: System Account (r:8 w:8) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:8 w:8) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn clear_all_leases() -> Weight { // Proof Size summary in bytes: - // Measured: `2759` + // Measured: `2792` // Estimated: `21814` - // Minimum execution time: 129_756_000 picoseconds. - Weight::from_parts(131_810_000, 0) + // Minimum execution time: 123_888_000 picoseconds. + Weight::from_parts(131_245_000, 0) .saturating_add(Weight::from_parts(0, 21814)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(9)) } - /// Storage: Slots Leases (r:1 w:0) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) + /// Storage: `Slots::Leases` (r:1 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn trigger_onboard() -> Weight { // Proof Size summary in bytes: - // Measured: `707` - // Estimated: `4172` - // Minimum execution time: 29_527_000 picoseconds. - Weight::from_parts(30_055_000, 0) - .saturating_add(Weight::from_parts(0, 4172)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(3)) + // Measured: `612` + // Estimated: `4077` + // Minimum execution time: 27_341_000 picoseconds. + Weight::from_parts(28_697_000, 0) + .saturating_add(Weight::from_parts(0, 4077)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) } } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs index ac0f05301b4..653e1009f31 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs @@ -16,26 +16,28 @@ //! Autogenerated weights for `runtime_parachains::assigner_on_demand` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=runtime_parachains::assigner_on_demand // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=runtime_parachains::assigner_on_demand -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -57,13 +59,13 @@ impl runtime_parachains::assigner_on_demand::WeightInfo /// The range of component `s` is `[1, 9999]`. fn place_order_keep_alive(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `297 + s * (4 ±0)` - // Estimated: `3762 + s * (4 ±0)` - // Minimum execution time: 33_522_000 picoseconds. - Weight::from_parts(35_436_835, 0) - .saturating_add(Weight::from_parts(0, 3762)) - // Standard Error: 129 - .saturating_add(Weight::from_parts(14_041, 0).saturating_mul(s.into())) + // Measured: `363 + s * (4 ±0)` + // Estimated: `3828 + s * (4 ±0)` + // Minimum execution time: 25_298_000 picoseconds. + Weight::from_parts(21_486_098, 0) + .saturating_add(Weight::from_parts(0, 3828)) + // Standard Error: 136 + .saturating_add(Weight::from_parts(13_943, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) @@ -77,13 +79,13 @@ impl runtime_parachains::assigner_on_demand::WeightInfo /// The range of component `s` is `[1, 9999]`. fn place_order_allow_death(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `297 + s * (4 ±0)` - // Estimated: `3762 + s * (4 ±0)` - // Minimum execution time: 33_488_000 picoseconds. - Weight::from_parts(34_848_934, 0) - .saturating_add(Weight::from_parts(0, 3762)) - // Standard Error: 143 - .saturating_add(Weight::from_parts(14_215, 0).saturating_mul(s.into())) + // Measured: `363 + s * (4 ±0)` + // Estimated: `3828 + s * (4 ±0)` + // Minimum execution time: 25_421_000 picoseconds. + Weight::from_parts(21_828_043, 0) + .saturating_add(Weight::from_parts(0, 3828)) + // Standard Error: 133 + .saturating_add(Weight::from_parts(13_831, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs index ca0575cb1b6..caad090ae15 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs @@ -17,25 +17,27 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=runtime_parachains::configuration // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=runtime_parachains::configuration -// --chain=rococo-dev // --header=./polkadot/file_header.txt -// --output=./polkadot/runtime/rococo/src/weights/ +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -58,8 +60,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_789_000 picoseconds. - Weight::from_parts(8_269_000, 0) + // Minimum execution time: 7_689_000 picoseconds. + Weight::from_parts(8_089_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -74,8 +76,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_851_000 picoseconds. - Weight::from_parts(8_152_000, 0) + // Minimum execution time: 7_735_000 picoseconds. + Weight::from_parts(8_150_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -90,8 +92,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_960_000 picoseconds. - Weight::from_parts(8_276_000, 0) + // Minimum execution time: 7_902_000 picoseconds. + Weight::from_parts(8_196_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -116,8 +118,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_912_000 picoseconds. - Weight::from_parts(8_164_000, 0) + // Minimum execution time: 7_634_000 picoseconds. + Weight::from_parts(7_983_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -132,8 +134,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 9_782_000 picoseconds. - Weight::from_parts(10_373_000, 0) + // Minimum execution time: 9_580_000 picoseconds. + Weight::from_parts(9_989_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -148,8 +150,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_870_000 picoseconds. - Weight::from_parts(8_274_000, 0) + // Minimum execution time: 7_787_000 picoseconds. + Weight::from_parts(8_008_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -164,8 +166,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 9_960_000 picoseconds. - Weight::from_parts(10_514_000, 0) + // Minimum execution time: 9_557_000 picoseconds. + Weight::from_parts(9_994_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -180,8 +182,8 @@ impl runtime_parachains::configuration::WeightInfo for // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_913_000 picoseconds. - Weight::from_parts(8_338_000, 0) + // Minimum execution time: 7_775_000 picoseconds. + Weight::from_parts(7_989_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_disputes.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_disputes.rs index 63a8c3addc7..cf1aa36e10e 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_disputes.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_disputes.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::disputes` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::disputes // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_disputes.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_disputes.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,14 +50,14 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::disputes`. pub struct WeightInfo(PhantomData); impl runtime_parachains::disputes::WeightInfo for WeightInfo { - /// Storage: ParasDisputes Frozen (r:0 w:1) - /// Proof Skipped: ParasDisputes Frozen (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ParasDisputes::Frozen` (r:0 w:1) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_unfreeze() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_937_000 picoseconds. - Weight::from_parts(3_082_000, 0) + // Minimum execution time: 1_855_000 picoseconds. + Weight::from_parts(2_015_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs index 417820e6627..a3b912491e5 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::hrmp` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::hrmp // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_hrmp.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,105 +50,97 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::hrmp`. pub struct WeightInfo(PhantomData); impl runtime_parachains::hrmp::WeightInfo for WeightInfo { - /// Storage: Paras ParaLifecycles (r:2 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:1 w:0) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:1 w:0) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn hrmp_init_open_channel() -> Weight { // Proof Size summary in bytes: - // Measured: `704` - // Estimated: `6644` - // Minimum execution time: 41_564_000 picoseconds. - Weight::from_parts(42_048_000, 0) - .saturating_add(Weight::from_parts(0, 6644)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `488` + // Estimated: `3953` + // Minimum execution time: 34_911_000 picoseconds. + Weight::from_parts(35_762_000, 0) + .saturating_add(Weight::from_parts(0, 3953)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpIngressChannelsIndex (r:1 w:0) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn hrmp_accept_open_channel() -> Weight { // Proof Size summary in bytes: - // Measured: `936` - // Estimated: `4401` - // Minimum execution time: 43_570_000 picoseconds. - Weight::from_parts(44_089_000, 0) - .saturating_add(Weight::from_parts(0, 4401)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `478` + // Estimated: `3943` + // Minimum execution time: 31_483_000 picoseconds. + Weight::from_parts(32_230_000, 0) + .saturating_add(Weight::from_parts(0, 3943)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Hrmp HrmpChannels (r:1 w:0) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpCloseChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpCloseChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpCloseChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpCloseChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpCloseChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpCloseChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpCloseChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpCloseChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn hrmp_close_channel() -> Weight { // Proof Size summary in bytes: - // Measured: `807` - // Estimated: `4272` - // Minimum execution time: 36_594_000 picoseconds. - Weight::from_parts(37_090_000, 0) - .saturating_add(Weight::from_parts(0, 4272)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `591` + // Estimated: `4056` + // Minimum execution time: 32_153_000 picoseconds. + Weight::from_parts(32_982_000, 0) + .saturating_add(Weight::from_parts(0, 4056)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:254 w:254) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:0 w:1) - /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelContents (r:0 w:254) - /// Proof Skipped: Hrmp HrmpChannelContents (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:0 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:254 w:254) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:0 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelContents` (r:0 w:254) + /// Proof: `Hrmp::HrmpChannelContents` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:0 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 127]`. /// The range of component `e` is `[0, 127]`. fn force_clean_hrmp(i: u32, e: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `264 + e * (100 ±0) + i * (100 ±0)` - // Estimated: `3726 + e * (2575 ±0) + i * (2575 ±0)` - // Minimum execution time: 1_085_140_000 picoseconds. - Weight::from_parts(1_100_901_000, 0) - .saturating_add(Weight::from_parts(0, 3726)) - // Standard Error: 98_982 - .saturating_add(Weight::from_parts(3_229_112, 0).saturating_mul(i.into())) - // Standard Error: 98_982 - .saturating_add(Weight::from_parts(3_210_944, 0).saturating_mul(e.into())) + // Measured: `297 + e * (100 ±0) + i * (100 ±0)` + // Estimated: `3759 + e * (2575 ±0) + i * (2575 ±0)` + // Minimum execution time: 1_240_769_000 picoseconds. + Weight::from_parts(1_249_285_000, 0) + .saturating_add(Weight::from_parts(0, 3759)) + // Standard Error: 112_346 + .saturating_add(Weight::from_parts(3_449_114, 0).saturating_mul(i.into())) + // Standard Error: 112_346 + .saturating_add(Weight::from_parts(3_569_184, 0).saturating_mul(e.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(e.into()))) @@ -155,139 +150,139 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf .saturating_add(Weight::from_parts(0, 2575).saturating_mul(e.into())) .saturating_add(Weight::from_parts(0, 2575).saturating_mul(i.into())) } - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:128 w:128) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:256 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:128 w:128) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:128 w:128) - /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:0 w:128) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:128 w:128) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:256 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:128 w:128) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:128 w:128) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:0 w:128) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 128]`. fn force_process_hrmp_open(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `779 + c * (136 ±0)` - // Estimated: `2234 + c * (5086 ±0)` - // Minimum execution time: 10_497_000 picoseconds. - Weight::from_parts(6_987_455, 0) - .saturating_add(Weight::from_parts(0, 2234)) - // Standard Error: 18_540 - .saturating_add(Weight::from_parts(18_788_534, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) + // Measured: `525 + c * (136 ±0)` + // Estimated: `1980 + c * (5086 ±0)` + // Minimum execution time: 6_026_000 picoseconds. + Weight::from_parts(6_257_000, 0) + .saturating_add(Weight::from_parts(0, 1980)) + // Standard Error: 9_732 + .saturating_add(Weight::from_parts(21_049_890, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((6_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 5086).saturating_mul(c.into())) } - /// Storage: Hrmp HrmpCloseChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpCloseChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:128 w:128) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpCloseChannelRequests (r:0 w:128) - /// Proof Skipped: Hrmp HrmpCloseChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelContents (r:0 w:128) - /// Proof Skipped: Hrmp HrmpChannelContents (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpCloseChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpCloseChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:128 w:128) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpCloseChannelRequests` (r:0 w:128) + /// Proof: `Hrmp::HrmpCloseChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelContents` (r:0 w:128) + /// Proof: `Hrmp::HrmpChannelContents` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 128]`. fn force_process_hrmp_close(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `335 + c * (124 ±0)` - // Estimated: `1795 + c * (2600 ±0)` - // Minimum execution time: 6_575_000 picoseconds. - Weight::from_parts(1_228_642, 0) - .saturating_add(Weight::from_parts(0, 1795)) - // Standard Error: 14_826 - .saturating_add(Weight::from_parts(11_604_038, 0).saturating_mul(c.into())) + // Measured: `368 + c * (124 ±0)` + // Estimated: `1828 + c * (2600 ±0)` + // Minimum execution time: 4_991_000 picoseconds. + Weight::from_parts(984_758, 0) + .saturating_add(Weight::from_parts(0, 1828)) + // Standard Error: 11_918 + .saturating_add(Weight::from_parts(13_018_813, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 2600).saturating_mul(c.into())) } - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 128]`. fn hrmp_cancel_open_request(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1026 + c * (13 ±0)` - // Estimated: `4295 + c * (15 ±0)` - // Minimum execution time: 22_301_000 picoseconds. - Weight::from_parts(26_131_473, 0) - .saturating_add(Weight::from_parts(0, 4295)) - // Standard Error: 830 - .saturating_add(Weight::from_parts(49_448, 0).saturating_mul(c.into())) + // Measured: `1059 + c * (13 ±0)` + // Estimated: `4328 + c * (15 ±0)` + // Minimum execution time: 17_299_000 picoseconds. + Weight::from_parts(27_621_478, 0) + .saturating_add(Weight::from_parts(0, 4328)) + // Standard Error: 2_527 + .saturating_add(Weight::from_parts(121_149, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 15).saturating_mul(c.into())) } - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:128 w:128) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:128 w:128) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 128]`. fn clean_open_channel_requests(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `243 + c * (63 ±0)` - // Estimated: `1722 + c * (2538 ±0)` - // Minimum execution time: 5_234_000 picoseconds. - Weight::from_parts(7_350_270, 0) - .saturating_add(Weight::from_parts(0, 1722)) - // Standard Error: 3_105 - .saturating_add(Weight::from_parts(2_981_935, 0).saturating_mul(c.into())) + // Measured: `276 + c * (63 ±0)` + // Estimated: `1755 + c * (2538 ±0)` + // Minimum execution time: 3_764_000 picoseconds. + Weight::from_parts(5_935_301, 0) + .saturating_add(Weight::from_parts(0, 1755)) + // Standard Error: 3_761 + .saturating_add(Weight::from_parts(3_290_277, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 2538).saturating_mul(c.into())) } - /// Storage: Paras ParaLifecycles (r:2 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:1 w:0) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:1 w:0) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:2 w:2) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:2 w:2) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpIngressChannelsIndex (r:1 w:0) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured) - fn force_open_hrmp_channel(_c: u32, ) -> Weight { + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `c` is `[0, 1]`. + fn force_open_hrmp_channel(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `704` - // Estimated: `6644` - // Minimum execution time: 55_611_000 picoseconds. - Weight::from_parts(56_488_000, 0) - .saturating_add(Weight::from_parts(0, 6644)) - .saturating_add(T::DbWeight::get().reads(14)) + // Measured: `488 + c * (235 ±0)` + // Estimated: `6428 + c * (235 ±0)` + // Minimum execution time: 49_506_000 picoseconds. + Weight::from_parts(51_253_075, 0) + .saturating_add(Weight::from_parts(0, 6428)) + // Standard Error: 144_082 + .saturating_add(Weight::from_parts(12_862_224, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(8)) + .saturating_add(Weight::from_parts(0, 235).saturating_mul(c.into())) } /// Storage: `Paras::ParaLifecycles` (r:1 w:0) /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -311,11 +306,11 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) fn establish_system_channel() -> Weight { // Proof Size summary in bytes: - // Measured: `417` - // Estimated: `6357` - // Minimum execution time: 629_674_000 picoseconds. - Weight::from_parts(640_174_000, 0) - .saturating_add(Weight::from_parts(0, 6357)) + // Measured: `488` + // Estimated: `6428` + // Minimum execution time: 50_016_000 picoseconds. + Weight::from_parts(50_933_000, 0) + .saturating_add(Weight::from_parts(0, 6428)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(8)) } @@ -323,11 +318,11 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) fn poke_channel_deposits() -> Weight { // Proof Size summary in bytes: - // Measured: `263` - // Estimated: `3728` - // Minimum execution time: 173_371_000 picoseconds. - Weight::from_parts(175_860_000, 0) - .saturating_add(Weight::from_parts(0, 3728)) + // Measured: `296` + // Estimated: `3761` + // Minimum execution time: 12_280_000 picoseconds. + Weight::from_parts(12_863_000, 0) + .saturating_add(Weight::from_parts(0, 3761)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_inclusion.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_inclusion.rs index a121ad774ce..b9ec7565bd5 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_inclusion.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_inclusion.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::inclusion` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::inclusion // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_inclusion.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_inclusion.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,27 +50,25 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::inclusion`. pub struct WeightInfo(PhantomData); impl runtime_parachains::inclusion::WeightInfo for WeightInfo { - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:999) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:999) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) /// The range of component `i` is `[1, 1000]`. fn receive_upward_messages(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `33280` + // Measured: `32993` // Estimated: `36283` - // Minimum execution time: 71_094_000 picoseconds. - Weight::from_parts(71_436_000, 0) + // Minimum execution time: 72_675_000 picoseconds. + Weight::from_parts(73_290_000, 0) .saturating_add(Weight::from_parts(0, 36283)) - // Standard Error: 22_149 - .saturating_add(Weight::from_parts(51_495_472, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(3)) + // Standard Error: 16_067 + .saturating_add(Weight::from_parts(57_735_739, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_initializer.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_initializer.rs index 5c627507dfb..e8c554610c9 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_initializer.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_initializer.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::initializer` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::initializer // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_initializer.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_initializer.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,18 +50,18 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::initializer`. pub struct WeightInfo(PhantomData); impl runtime_parachains::initializer::WeightInfo for WeightInfo { - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `d` is `[0, 65536]`. fn force_approve(d: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + d * (11 ±0)` // Estimated: `1480 + d * (11 ±0)` - // Minimum execution time: 3_771_000 picoseconds. - Weight::from_parts(6_491_437, 0) + // Minimum execution time: 2_634_000 picoseconds. + Weight::from_parts(2_728_000, 0) .saturating_add(Weight::from_parts(0, 1480)) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_356, 0).saturating_mul(d.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(2_499, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(Weight::from_parts(0, 11).saturating_mul(d.into())) diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras.rs index dfd95006dc7..af26bfc9ae9 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::paras` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::paras // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_paras.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_paras.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,247 +50,248 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::paras`. pub struct WeightInfo(PhantomData); impl runtime_parachains::paras::WeightInfo for WeightInfo { - /// Storage: Paras CurrentCodeHash (r:1 w:1) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras PastCodeMeta (r:1 w:1) - /// Proof Skipped: Paras PastCodeMeta (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras PastCodePruning (r:1 w:1) - /// Proof Skipped: Paras PastCodePruning (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PastCodeHash (r:0 w:1) - /// Proof Skipped: Paras PastCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:0 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:1) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PastCodeMeta` (r:1 w:1) + /// Proof: `Paras::PastCodeMeta` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PastCodePruning` (r:1 w:1) + /// Proof: `Paras::PastCodePruning` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PastCodeHash` (r:0 w:1) + /// Proof: `Paras::PastCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:0 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 3145728]`. fn force_set_current_code(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `8309` // Estimated: `11774` - // Minimum execution time: 31_941_000 picoseconds. - Weight::from_parts(32_139_000, 0) + // Minimum execution time: 27_488_000 picoseconds. + Weight::from_parts(27_810_000, 0) .saturating_add(Weight::from_parts(0, 11774)) - // Standard Error: 5 - .saturating_add(Weight::from_parts(2_011, 0).saturating_mul(c.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(2_189, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 1048576]`. fn force_set_current_head(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_275_000 picoseconds. - Weight::from_parts(8_321_000, 0) + // Minimum execution time: 5_793_000 picoseconds. + Weight::from_parts(7_987_606, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2 - .saturating_add(Weight::from_parts(858, 0).saturating_mul(s.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(971, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: Paras Heads (r:0 w:1) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_set_most_recent_context() -> Weight { - Weight::from_parts(10_155_000, 0) - // Standard Error: 0 - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_733_000 picoseconds. + Weight::from_parts(2_954_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras FutureCodeHash (r:1 w:1) - /// Proof Skipped: Paras FutureCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:1 w:0) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeCooldowns (r:1 w:1) - /// Proof Skipped: Paras UpgradeCooldowns (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:1 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeRestrictionSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeRestrictionSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::FutureCodeHash` (r:1 w:1) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeCooldowns` (r:1 w:1) + /// Proof: `Paras::UpgradeCooldowns` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 3145728]`. fn force_schedule_code_upgrade(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `8715` - // Estimated: `12180` - // Minimum execution time: 49_923_000 picoseconds. - Weight::from_parts(50_688_000, 0) - .saturating_add(Weight::from_parts(0, 12180)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_976, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `8452` + // Estimated: `11917` + // Minimum execution time: 6_072_000 picoseconds. + Weight::from_parts(6_128_000, 0) + .saturating_add(Weight::from_parts(0, 11917)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_334, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 1048576]`. fn force_note_new_head(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `3560` - // Minimum execution time: 14_408_000 picoseconds. - Weight::from_parts(14_647_000, 0) - .saturating_add(Weight::from_parts(0, 3560)) - // Standard Error: 2 - .saturating_add(Weight::from_parts(858, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `163` + // Estimated: `3628` + // Minimum execution time: 15_166_000 picoseconds. + Weight::from_parts(21_398_053, 0) + .saturating_add(Weight::from_parts(0, 3628)) + // Standard Error: 1 + .saturating_add(Weight::from_parts(976, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_queue_action() -> Weight { // Proof Size summary in bytes: - // Measured: `4288` - // Estimated: `7753` - // Minimum execution time: 20_009_000 picoseconds. - Weight::from_parts(20_518_000, 0) - .saturating_add(Weight::from_parts(0, 7753)) + // Measured: `4312` + // Estimated: `7777` + // Minimum execution time: 16_345_000 picoseconds. + Weight::from_parts(16_712_000, 0) + .saturating_add(Weight::from_parts(0, 7777)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 3145728]`. fn add_trusted_validation_code(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `946` - // Estimated: `4411` - // Minimum execution time: 80_626_000 picoseconds. - Weight::from_parts(52_721_755, 0) - .saturating_add(Weight::from_parts(0, 4411)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_443, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `683` + // Estimated: `4148` + // Minimum execution time: 78_076_000 picoseconds. + Weight::from_parts(123_193_814, 0) + .saturating_add(Weight::from_parts(0, 4148)) + // Standard Error: 5 + .saturating_add(Weight::from_parts(1_770, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Paras CodeByHashRefs (r:1 w:0) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:0 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:0) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:0 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) fn poke_unused_validation_code() -> Weight { // Proof Size summary in bytes: // Measured: `28` // Estimated: `3493` - // Minimum execution time: 6_692_000 picoseconds. - Weight::from_parts(7_009_000, 0) + // Minimum execution time: 5_184_000 picoseconds. + Weight::from_parts(5_430_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement() -> Weight { // Proof Size summary in bytes: - // Measured: `26682` - // Estimated: `30147` - // Minimum execution time: 87_994_000 picoseconds. - Weight::from_parts(89_933_000, 0) - .saturating_add(Weight::from_parts(0, 30147)) + // Measured: `26706` + // Estimated: `30171` + // Minimum execution time: 102_995_000 picoseconds. + Weight::from_parts(108_977_000, 0) + .saturating_add(Weight::from_parts(0, 30171)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras UpcomingUpgrades (r:1 w:1) - /// Proof Skipped: Paras UpcomingUpgrades (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:0 w:100) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpcomingUpgrades` (r:1 w:1) + /// Proof: `Paras::UpcomingUpgrades` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:0 w:100) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement_finalize_upgrade_accept() -> Weight { // Proof Size summary in bytes: - // Measured: `27523` - // Estimated: `30988` - // Minimum execution time: 783_222_000 picoseconds. - Weight::from_parts(794_959_000, 0) - .saturating_add(Weight::from_parts(0, 30988)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `27360` + // Estimated: `30825` + // Minimum execution time: 709_433_000 picoseconds. + Weight::from_parts(725_074_000, 0) + .saturating_add(Weight::from_parts(0, 30825)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(104)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement_finalize_upgrade_reject() -> Weight { // Proof Size summary in bytes: - // Measured: `27214` - // Estimated: `30679` - // Minimum execution time: 87_424_000 picoseconds. - Weight::from_parts(88_737_000, 0) - .saturating_add(Weight::from_parts(0, 30679)) + // Measured: `27338` + // Estimated: `30803` + // Minimum execution time: 98_973_000 picoseconds. + Weight::from_parts(104_715_000, 0) + .saturating_add(Weight::from_parts(0, 30803)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement_finalize_onboarding_accept() -> Weight { // Proof Size summary in bytes: - // Measured: `26991` - // Estimated: `30456` - // Minimum execution time: 612_485_000 picoseconds. - Weight::from_parts(621_670_000, 0) - .saturating_add(Weight::from_parts(0, 30456)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `26728` + // Estimated: `30193` + // Minimum execution time: 550_958_000 picoseconds. + Weight::from_parts(564_497_000, 0) + .saturating_add(Weight::from_parts(0, 30193)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement_finalize_onboarding_reject() -> Weight { // Proof Size summary in bytes: - // Measured: `26682` - // Estimated: `30147` - // Minimum execution time: 86_673_000 picoseconds. - Weight::from_parts(87_424_000, 0) - .saturating_add(Weight::from_parts(0, 30147)) + // Measured: `26706` + // Estimated: `30171` + // Minimum execution time: 97_088_000 picoseconds. + Weight::from_parts(103_617_000, 0) + .saturating_add(Weight::from_parts(0, 30171)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs index a102d1903b2..374927f8470 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs @@ -13,161 +13,334 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . + //! Autogenerated weights for `runtime_parachains::paras_inherent` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-11-20, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 128 +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/release/polkadot +// ./target/production/polkadot // benchmark +// pallet // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::paras_inherent // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs -// --header=./file_header.txt +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_paras_inherent.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; +use core::marker::PhantomData; /// Weight functions for `runtime_parachains::paras_inherent`. pub struct WeightInfo(PhantomData); impl runtime_parachains::paras_inherent::WeightInfo for WeightInfo { - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParaSessionInfo Sessions (r:1 w:0) - // Storage: ParasDisputes Disputes (r:1 w:1) - // Storage: ParasDisputes Included (r:1 w:1) - // Storage: ParasDisputes SpamSlots (r:1 w:1) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaSessionInfo::Sessions` (r:1 w:0) + /// Proof: `ParaSessionInfo::Sessions` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:1) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::BackersOnDisputes` (r:1 w:1) + /// Proof: `ParasDisputes::BackersOnDisputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:1 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::PendingAvailability` (r:2 w:1) + /// Proof: `ParaInclusion::PendingAvailability` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::PendingAvailabilityCommitments` (r:1 w:1) + /// Proof: `ParaInclusion::PendingAvailabilityCommitments` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `v` is `[10, 200]`. fn enter_variable_disputes(v: u32, ) -> Weight { - Weight::from_parts(352_590_000 as u64, 0) - // Standard Error: 13_000 - .saturating_add(Weight::from_parts(49_254_000 as u64, 0).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().reads(24 as u64)) - .saturating_add(T::DbWeight::get().writes(16 as u64)) + // Proof Size summary in bytes: + // Measured: `67819` + // Estimated: `73759 + v * (23 ±0)` + // Minimum execution time: 874_229_000 picoseconds. + Weight::from_parts(486_359_072, 0) + .saturating_add(Weight::from_parts(0, 73759)) + // Standard Error: 19_197 + .saturating_add(Weight::from_parts(41_842_161, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(26)) + .saturating_add(T::DbWeight::get().writes(16)) + .saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into())) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:1 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInclusion AvailabilityBitfields (r:0 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::PendingAvailability` (r:2 w:1) + /// Proof: `ParaInclusion::PendingAvailability` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::PendingAvailabilityCommitments` (r:1 w:1) + /// Proof: `ParaInclusion::PendingAvailabilityCommitments` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::AvailabilityBitfields` (r:0 w:1) + /// Proof: `ParaInclusion::AvailabilityBitfields` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_bitfields() -> Weight { - Weight::from_parts(299_878_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(21 as u64)) - .saturating_add(T::DbWeight::get().writes(15 as u64)) + // Proof Size summary in bytes: + // Measured: `42791` + // Estimated: `48731` + // Minimum execution time: 428_757_000 picoseconds. + Weight::from_parts(449_681_000, 0) + .saturating_add(Weight::from_parts(0, 48731)) + .saturating_add(T::DbWeight::get().reads(24)) + .saturating_add(T::DbWeight::get().writes(17)) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:2 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Paras PastCodeMeta (r:1 w:0) - // Storage: Paras CurrentCodeHash (r:1 w:0) - // Storage: Ump RelayDispatchQueueSize (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) - fn enter_backed_candidates_variable(_v: u32) -> Weight { - Weight::from_parts(442_472_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(25 as u64)) - .saturating_add(T::DbWeight::get().writes(14 as u64)) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::PendingAvailability` (r:2 w:1) + /// Proof: `ParaInclusion::PendingAvailability` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::PendingAvailabilityCommitments` (r:1 w:1) + /// Proof: `ParaInclusion::PendingAvailabilityCommitments` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `v` is `[101, 200]`. + fn enter_backed_candidates_variable(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `42863` + // Estimated: `48803` + // Minimum execution time: 1_276_079_000 picoseconds. + Weight::from_parts(1_313_585_212, 0) + .saturating_add(Weight::from_parts(0, 48803)) + // Standard Error: 18_279 + .saturating_add(Weight::from_parts(43_528, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(27)) + .saturating_add(T::DbWeight::get().writes(16)) } - // Storage: ParaInherent Included (r:1 w:1) - // Storage: System ParentHash (r:1 w:0) - // Storage: ParaScheduler AvailabilityCores (r:1 w:1) - // Storage: ParasShared CurrentSessionIndex (r:1 w:0) - // Storage: Configuration ActiveConfig (r:1 w:0) - // Storage: ParasDisputes Frozen (r:1 w:0) - // Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - // Storage: Paras Parachains (r:1 w:0) - // Storage: ParaInclusion PendingAvailability (r:2 w:1) - // Storage: ParaInclusion PendingAvailabilityCommitments (r:1 w:1) - // Storage: Dmp DownwardMessageQueues (r:1 w:1) - // Storage: Hrmp HrmpChannelDigests (r:1 w:1) - // Storage: Paras FutureCodeUpgrades (r:1 w:0) - // Storage: ParasDisputes Disputes (r:2 w:0) - // Storage: ParaScheduler SessionStartBlock (r:1 w:0) - // Storage: ParaScheduler ParathreadQueue (r:1 w:1) - // Storage: ParaScheduler Scheduled (r:1 w:1) - // Storage: ParaScheduler ValidatorGroups (r:1 w:0) - // Storage: Paras PastCodeMeta (r:1 w:0) - // Storage: Paras CurrentCodeHash (r:1 w:0) - // Storage: Ump RelayDispatchQueueSize (r:1 w:0) - // Storage: Ump NeedsDispatch (r:1 w:1) - // Storage: Ump NextDispatchRoundStartWith (r:1 w:1) - // Storage: ParaInherent OnChainVotes (r:0 w:1) - // Storage: ParasDisputes Included (r:0 w:1) - // Storage: Hrmp HrmpWatermarks (r:0 w:1) - // Storage: Paras Heads (r:0 w:1) + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) + /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::PendingAvailability` (r:2 w:1) + /// Proof: `ParaInclusion::PendingAvailability` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::PendingAvailabilityCommitments` (r:1 w:1) + /// Proof: `ParaInclusion::PendingAvailabilityCommitments` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:0) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_backed_candidate_code_upgrade() -> Weight { - Weight::from_parts(36_903_411_000 as u64, 0) - .saturating_add(T::DbWeight::get().reads(25 as u64)) - .saturating_add(T::DbWeight::get().writes(14 as u64)) + // Proof Size summary in bytes: + // Measured: `42876` + // Estimated: `48816` + // Minimum execution time: 34_352_245_000 picoseconds. + Weight::from_parts(34_587_559_000, 0) + .saturating_add(Weight::from_parts(0, 48816)) + .saturating_add(T::DbWeight::get().reads(29)) + .saturating_add(T::DbWeight::get().writes(16)) } } diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 9753a409304..bdf68864605 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -155,6 +155,7 @@ runtime-benchmarks = [ "pallet-staking/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 6c899c52701..a0617b3108f 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -238,6 +238,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = frame_support::weights::ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = (); } parameter_types! { @@ -393,7 +394,7 @@ where let current_block = System::block_number().saturated_into::().saturating_sub(1); let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -405,16 +406,17 @@ where frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ); - let raw_payload = SignedPayload::new(call, extra) + ) + .into(); + let raw_payload = SignedPayload::new(call, tx_ext) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; - let (call, extra, _) = raw_payload.deconstruct(); + let (call, tx_ext, _) = raw_payload.deconstruct(); let address = Indices::unlookup(account); - Some((call, (address, signature, extra))) + Some((call, (address, signature, tx_ext))) } } @@ -442,12 +444,32 @@ parameter_types! { pub Prefix: &'static [u8] = b"Pay KSMs to the Kusama account:"; } +#[cfg(feature = "runtime-benchmarks")] +pub struct ClaimsHelper; + +#[cfg(feature = "runtime-benchmarks")] +use frame_support::dispatch::DispatchInfo; + +#[cfg(feature = "runtime-benchmarks")] +impl claims::BenchmarkHelperTrait for ClaimsHelper { + fn default_call_and_info() -> (RuntimeCall, DispatchInfo) { + use frame_support::dispatch::GetDispatchInfo; + let call = RuntimeCall::Claims(claims::Call::attest { + statement: claims::StatementKind::Regular.to_text().to_vec(), + }); + let info = call.get_dispatch_info(); + (call, info) + } +} + impl claims::Config for Runtime { type RuntimeEvent = RuntimeEvent; type VestingSchedule = Vesting; type Prefix = Prefix; type MoveClaimOrigin = frame_system::EnsureRoot; type WeightInfo = claims::TestWeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = ClaimsHelper; } parameter_types! { @@ -728,8 +750,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The `SignedExtension` to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -741,7 +763,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< @@ -752,7 +774,7 @@ pub type Executive = frame_executive::Executive< AllPalletsWithSystem, >; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; pub type Hash = ::Hash; pub type Extrinsic = ::Extrinsic; diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 4180828bcfb..6899edeeaeb 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -270,6 +270,7 @@ runtime-benchmarks = [ "pallet-state-trie-migration/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index b55d68ee0f6..45bbd0260e5 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -200,6 +200,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type MaxConsumers = frame_support::traits::ConstU32<16>; } @@ -385,6 +386,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -820,7 +822,7 @@ where // so the actual block number is `n`. .saturating_sub(1); let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -832,16 +834,17 @@ where frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ); - let raw_payload = SignedPayload::new(call, extra) + ) + .into(); + let raw_payload = SignedPayload::new(call, tx_ext) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; - let (call, extra, _) = raw_payload.deconstruct(); + let (call, tx_ext, _) = raw_payload.deconstruct(); let address = ::Lookup::unlookup(account); - Some((call, (address, signature, extra))) + Some((call, (address, signature, tx_ext))) } } @@ -1548,8 +1551,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The `SignedExtension` to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -1713,7 +1716,7 @@ pub mod migrations { /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -1724,7 +1727,7 @@ pub type Executive = frame_executive::Executive< Migrations, >; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; #[cfg(feature = "runtime-benchmarks")] mod benches { @@ -1770,7 +1773,9 @@ mod benches { [pallet_staking, Staking] [pallet_sudo, Sudo] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_treasury, Treasury] [pallet_utility, Utility] [pallet_vesting, Vesting] @@ -2312,6 +2317,7 @@ sp_api::impl_runtime_apis! { use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; @@ -2340,6 +2346,7 @@ sp_api::impl_runtime_apis! { use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; impl pallet_session_benchmarking::Config for Runtime {} diff --git a/polkadot/runtime/westend/src/weights/frame_system_extensions.rs b/polkadot/runtime/westend/src/weights/frame_system_extensions.rs new file mode 100644 index 00000000000..e4e1a799ec6 --- /dev/null +++ b/polkadot/runtime/westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,116 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-20, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot +// benchmark +// pallet +// --steps=2 +// --repeat=2 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --pallet=frame-system-extensions +// --chain=westend-dev +// --output=./polkadot/runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_356_000 picoseconds. + Weight::from_parts(7_805_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_510_000 picoseconds. + Weight::from_parts(10_009_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 761_000 picoseconds. + Weight::from_parts(4_308_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_396_000 picoseconds. + Weight::from_parts(7_585_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 521_000 picoseconds. + Weight::from_parts(4_168_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 541_000 picoseconds. + Weight::from_parts(4_228_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 3_807_000 picoseconds. + Weight::from_parts(8_025_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/westend/src/weights/mod.rs b/polkadot/runtime/westend/src/weights/mod.rs index f6a9008d718..8aeb4216550 100644 --- a/polkadot/runtime/westend/src/weights/mod.rs +++ b/polkadot/runtime/westend/src/weights/mod.rs @@ -17,6 +17,7 @@ pub mod frame_election_provider_support; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_asset_rate; pub mod pallet_bags_list; pub mod pallet_balances; @@ -37,6 +38,7 @@ pub mod pallet_session; pub mod pallet_staking; pub mod pallet_sudo; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_vesting; diff --git a/polkadot/runtime/westend/src/weights/pallet_sudo.rs b/polkadot/runtime/westend/src/weights/pallet_sudo.rs index e9ab3ad37a4..649c43e031d 100644 --- a/polkadot/runtime/westend/src/weights/pallet_sudo.rs +++ b/polkadot/runtime/westend/src/weights/pallet_sudo.rs @@ -94,4 +94,15 @@ impl pallet_sudo::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 2_875_000 picoseconds. + Weight::from_parts(6_803_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + } } diff --git a/polkadot/runtime/westend/src/weights/pallet_transaction_payment.rs b/polkadot/runtime/westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 00000000000..001a09f103d --- /dev/null +++ b/polkadot/runtime/westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,65 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot +// benchmark +// pallet +// --steps=2 +// --repeat=2 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --pallet=pallet_transaction_payment +// --chain=westend-dev +// --output=./polkadot/runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `156` + // Estimated: `1641` + // Minimum execution time: 30_888_000 picoseconds. + Weight::from_parts(41_308_000, 0) + .saturating_add(Weight::from_parts(0, 1641)) + .saturating_add(T::DbWeight::get().reads(3)) + } +} diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 10726b0f511..660a30605e5 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -47,6 +47,7 @@ runtime-benchmarks = [ "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-salary/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index 9892c500f2e..3bd2fa4b4f5 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -25,14 +25,22 @@ use frame_support::{ traits::{ConstU32, Everything}, }; use frame_system::{EnsureRoot, EnsureSigned}; -use polkadot_test_runtime::SignedExtra; use primitives::{AccountIndex, BlakeTwo256, Signature}; use sp_runtime::{generic, traits::MaybeEquivalence, AccountId32, BuildStorage}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; +pub type TxExtension = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckMortality, + frame_system::CheckNonce, + frame_system::CheckWeight, +); pub type Address = sp_runtime::MultiAddress; pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; pub type Header = generic::Header; pub type Block = generic::Block; diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs index a20390b64f9..f953e54111c 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -46,13 +46,13 @@ use xcm_builder::{ }; use xcm_executor::{Config, XcmExecutor}; -pub type SignedExtra = (frame_system::CheckNonZeroSender,); +pub type TxExtension = (frame_system::CheckNonZeroSender,); pub type BlockNumber = u64; pub type Address = MultiAddress; pub type Header = generic::Header; pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; pub type Block = generic::Block; pub type Signature = MultiSignature; diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index 5bf65fa9f9a..a11e535492c 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -45,13 +45,13 @@ use xcm_builder::{ }; use xcm_executor::{Config, XcmExecutor}; -pub type SignedExtra = (frame_system::CheckNonZeroSender,); +pub type TxExtension = (frame_system::CheckNonZeroSender,); pub type BlockNumber = u64; pub type Address = MultiAddress; pub type Header = generic::Header; pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; pub type Block = generic::Block; pub type Signature = MultiSignature; diff --git a/prdoc/pr_2280.prdoc b/prdoc/pr_2280.prdoc new file mode 100644 index 00000000000..3026dc254e6 --- /dev/null +++ b/prdoc/pr_2280.prdoc @@ -0,0 +1,144 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: FRAME Create `TransactionExtension` as a replacement for `SignedExtension` + +doc: + - audience: Runtime User + description: | + Introduces a new trait `TransactionExtension` to replace `SignedExtension`. Introduce the + idea of transactions which obey the runtime's extensions and have according Extension data + (né Extra data) yet do not have hard-coded signatures. + + Deprecate the terminology of "Unsigned" when used for transactions/extrinsics owing to there + now being "proper" unsigned transactions which obey the extension framework and "old-style" + unsigned which do not. Instead we have `General` for the former and `Bare` for the latter. + Unsigned will be phased out as a type of transaction, and `Bare` will only be used for + Inherents. + + Types of extrinsic are now therefore + - Bare (no hardcoded signature, no Extra data; used to be known as "Unsigned") + - Bare transactions (deprecated) - Gossiped, validated with `ValidateUnsigned` + (deprecated) and the `_bare_compat` bits of `TransactionExtension` (deprecated). + - Inherents - Not gossiped, validated with `ProvideInherent`. + - Extended (Extra data) - Gossiped, validated via `TransactionExtension`. + - Signed transactions (with a hardcoded signature). + - General transactions (without a hardcoded signature). + + Notable information on `TransactionExtension` and the differences from `SignedExtension` + - `AdditionalSigned`/`additional_signed` is renamed to `Implicit`/`implicit`. It is encoded + for the entire transaction and passed in to each extension as a new argument to validate. + - `pre_dispatch` is renamed to `prepare`. + - `validate` runs transaction validation logic both off-chain and on-chain, and is + non-mutating. + - `prepare` runs on-chain pre-execution logic using information extracted during validation + and is mutating. + - `validate` and `prepare` are now passed an `Origin` rather than an `AccountId`. If the + extension logic presumes an `AccountId`, consider using the trait function + `AsSystemOriginSigner::as_system_origin_signer`. + - A signature on the underlying transaction may validly not be present. + - The origin may be altered during validation. + - Validation functionality present in `validate` should not be repeated in `prepare`. + Useful information obtained during `validate` should now be passsed in to `prepare` using + the new user-specifiable type `Val`. + - Unsigned logic should be migrated from the old `*_unsigned` functions into the regular + versions of the new functions where the `Origin` is `None`. + - The `Call` type defining the runtime call is now a type parameter. + - `TransactionExtension` now takes a `Context` type parameter. This defines some arbitrary + contextual data that is injected into the transaction extension logic. It is unused in + instances migrated from `SignedExtension`. + - Extensions now track the weight they consume during valdiation, preparation and + post-dispatch through the `TransactionExtensionBase::weight` function. + - `TestXt` was removed and its usage in tests was replaced with `UncheckedExtrinsic` + instances. + + To fix the build issues introduced by this change, use the `AsTransactionExtension` adapter + to wrap existing `SignedExtension`s by converting them using the `From` + generic implementation for `AsTransactionExtension`. More details on migrating existing + `SignedExtension` implementations to `TransactionExtension` in the PR description. + +crates: + - name: bridge-runtime-common + - name: bp-bridge-hub-cumulus + - name: bp-kusama + - name: bp-polkadot-bulletin + - name: bp-polkadot + - name: bp-rococo + - name: bp-westend + - name: bp-polkadot-core + - name: bp-runtime + - name: snowbridge-pallet-inbound-queue + - name: snowbridge-pallet-outbound-queue + - name: snowbridge-pallet-system + - name: snowbridge-runtime-test-common + - name: parachain-template-runtime + - name: asset-hub-rococo-runtime + - name: asset-hub-westend-runtime + - name: bridge-hub-rococo-runtime + - name: bridge-hub-westend-runtime + - name: collectives-westend-runtime + - name: contracts-rococo-runtime + - name: coretime-rococo-runtime + - name: coretime-westend-runtime + - name: glutton-westend-runtime + - name: people-rococo-runtime + - name: people-westend-runtime + - name: seedling-runtime + - name: shell-runtime + - name: penpal-runtime + - name: rococo-parachain-runtime + - name: polkadot-parachain-bin + - name: cumulus-primitives-storage-weight-reclaim + - name: cumulus-test-client + - name: cumulus-test-runtime + - name: cumulus-test-service + - name: polkadot-sdk-docs + - name: polkadot-service + - name: polkadot-test-service + - name: polkadot-runtime-common + - name: rococo-runtime + - name: polkadot-test-runtime + - name: westend-runtime + - name: staging-xcm-builder + - name: minimal-runtime + - name: node-template + - name: node-template-runtime + - name: staging-node-cli + - name: kitchensink-runtime + - name: node-testing + - name: sc-client-api + - name: sc-client-db + - name: sc-network-gossip + - name: sc-network-sync + - name: sc-transaction-pool + - name: frame + - name: pallet-babe + - name: pallet-balances + - name: pallet-beefy + - name: pallet-collective + - name: pallet-election-provider-multi-phase + - name: pallet-elections-phragmen + - name: pallet-example-basic + - name: pallet-example-offchain-worker + - name: frame-executive + - name: pallet-grandpa + - name: pallet-im-online + - name: pallet-offences + - name: pallet-sassafras + - name: pallet-state-trie-migration + - name: pallet-sudo + - name: frame-support-procedural + - name: frame-support + - name: frame-system + - name: frame-system-benchmarking + - name: pallet-transaction-payment + - name: pallet-asset-conversion-tx-payment + - name: pallet-asset-tx-payment + - name: pallet-skip-feeless-payment + - name: sp-inherents + - name: sp-metadata-ir + - name: sp-runtime + - name: substrate-test-runtime + - name: frame-benchmarking-cli + - name: frame-remote-externalities + - name: substrate-rpc-client diff --git a/substrate/.maintain/frame-weight-template.hbs b/substrate/.maintain/frame-weight-template.hbs index ecd384a5145..ec9eee205ce 100644 --- a/substrate/.maintain/frame-weight-template.hbs +++ b/substrate/.maintain/frame-weight-template.hbs @@ -33,7 +33,7 @@ pub trait WeightInfo { /// Weights for `{{pallet}}` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -{{#if (eq pallet "frame_system")}} +{{#if (or (eq pallet "frame_system") (eq pallet "frame_system_extensions"))}} impl WeightInfo for SubstrateWeight { {{else}} impl WeightInfo for SubstrateWeight { diff --git a/substrate/bin/minimal/runtime/src/lib.rs b/substrate/bin/minimal/runtime/src/lib.rs index fb996aef46b..d3e8a2e8ec0 100644 --- a/substrate/bin/minimal/runtime/src/lib.rs +++ b/substrate/bin/minimal/runtime/src/lib.rs @@ -52,7 +52,7 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } -type SignedExtra = ( +type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -104,7 +104,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = FixedFee<1, ::Balance>; } -type Block = frame::runtime::types_common::BlockOf; +type Block = frame::runtime::types_common::BlockOf; type Header = HeaderFor; type RuntimeExecutive = diff --git a/substrate/bin/node-template/node/Cargo.toml b/substrate/bin/node-template/node/Cargo.toml index 9cba2b5a366..2f31947e492 100644 --- a/substrate/bin/node-template/node/Cargo.toml +++ b/substrate/bin/node-template/node/Cargo.toml @@ -78,6 +78,7 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "node-template-runtime/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "sc-service/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] diff --git a/substrate/bin/node-template/node/src/benchmarking.rs b/substrate/bin/node-template/node/src/benchmarking.rs index 6e29ad1a123..eacc367669c 100644 --- a/substrate/bin/node-template/node/src/benchmarking.rs +++ b/substrate/bin/node-template/node/src/benchmarking.rs @@ -109,7 +109,7 @@ pub fn create_benchmark_extrinsic( .checked_next_power_of_two() .map(|c| c / 2) .unwrap_or(2) as u64; - let extra: runtime::SignedExtra = ( + let tx_ext: runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -121,11 +121,12 @@ pub fn create_benchmark_extrinsic( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), - ); + ) + .into(); let raw_payload = runtime::SignedPayload::from_raw( call.clone(), - extra.clone(), + tx_ext.clone(), ( (), runtime::VERSION.spec_version, @@ -143,7 +144,7 @@ pub fn create_benchmark_extrinsic( call, sp_runtime::AccountId32::from(sender.public()).into(), runtime::Signature::Sr25519(signature), - extra, + tx_ext, ) } diff --git a/substrate/bin/node-template/runtime/Cargo.toml b/substrate/bin/node-template/runtime/Cargo.toml index 86bab0ca375..c7cffa568db 100644 --- a/substrate/bin/node-template/runtime/Cargo.toml +++ b/substrate/bin/node-template/runtime/Cargo.toml @@ -106,6 +106,7 @@ runtime-benchmarks = [ "pallet-sudo/runtime-benchmarks", "pallet-template/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] try-runtime = [ diff --git a/substrate/bin/node-template/runtime/src/lib.rs b/substrate/bin/node-template/runtime/src/lib.rs index 159697f427f..ee622a691b4 100644 --- a/substrate/bin/node-template/runtime/src/lib.rs +++ b/substrate/bin/node-template/runtime/src/lib.rs @@ -241,6 +241,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = IdentityFee; type LengthToFee = IdentityFee; type FeeMultiplierUpdate = ConstFeeMultiplier; + type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } impl pallet_sudo::Config for Runtime { @@ -276,8 +277,8 @@ pub type Address = sp_runtime::MultiAddress; pub type Header = generic::Header; /// Block type as expected by this runtime. pub type Block = generic::Block; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -296,9 +297,9 @@ type Migrations = (); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -314,6 +315,7 @@ mod benches { frame_benchmarking::define_benchmarks!( [frame_benchmarking, BaselineBench::] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_timestamp, Timestamp] [pallet_sudo, Sudo] @@ -498,6 +500,7 @@ impl_runtime_apis! { use frame_benchmarking::{baseline, Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use baseline::Pallet as BaselineBench; let mut list = Vec::::new(); @@ -514,6 +517,7 @@ impl_runtime_apis! { use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use baseline::Pallet as BaselineBench; impl frame_system_benchmarking::Config for Runtime {} diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index abe284c41da..c91a83e6d6e 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -196,6 +196,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "kitchensink-runtime/runtime-benchmarks", "node-inspect?/runtime-benchmarks", + "pallet-asset-conversion-tx-payment/runtime-benchmarks", "pallet-asset-tx-payment/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", @@ -205,6 +206,7 @@ runtime-benchmarks = [ "pallet-skip-feeless-payment/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "sc-client-db/runtime-benchmarks", "sc-service/runtime-benchmarks", diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index 23a62cc0bd2..b98a321c338 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -110,7 +110,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { fn extrinsic_set_time(now: u64) -> OpaqueExtrinsic { kitchensink_runtime::UncheckedExtrinsic { - signature: None, + preamble: sp_runtime::generic::Preamble::Bare, function: kitchensink_runtime::RuntimeCall::Timestamp(pallet_timestamp::Call::set { now }), } .into() diff --git a/substrate/bin/node/cli/benches/executor.rs b/substrate/bin/node/cli/benches/executor.rs index a326e1a79ea..e13d34f9657 100644 --- a/substrate/bin/node/cli/benches/executor.rs +++ b/substrate/bin/node/cli/benches/executor.rs @@ -29,7 +29,7 @@ use sp_core::{ storage::well_known_keys, traits::{CallContext, CodeExecutor, RuntimeCode}, }; -use sp_runtime::traits::BlakeTwo256; +use sp_runtime::{generic::ExtrinsicFormat, traits::BlakeTwo256}; use sp_state_machine::TestExternalities as CoreTestExternalities; use staging_node_cli::service::RuntimeExecutor; @@ -144,11 +144,11 @@ fn test_blocks( ) -> Vec<(Vec, Hash)> { let mut test_ext = new_test_ext(genesis_config); let mut block1_extrinsics = vec![CheckedExtrinsic { - signed: None, + format: ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: 0 }), }]; block1_extrinsics.extend((0..20).map(|i| CheckedExtrinsic { - signed: Some((alice(), signed_extra(i, 0))), + format: ExtrinsicFormat::Signed(alice(), tx_ext(i, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 1 * DOLLARS, diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 8f2aba6b44c..157f278fb53 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -107,18 +107,21 @@ pub fn create_extrinsic( .map(|c| c / 2) .unwrap_or(2) as u64; let tip = 0; - let extra: kitchensink_runtime::SignedExtra = + let tx_ext: kitchensink_runtime::TxExtension = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(generic::Era::mortal( - period, - best_block.saturated_into(), - )), - frame_system::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), + ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(generic::Era::mortal( + period, + best_block.saturated_into(), + )), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + ) + .into(), pallet_skip_feeless_payment::SkipCheckIfFeeless::from( pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::< kitchensink_runtime::Runtime, @@ -128,15 +131,17 @@ pub fn create_extrinsic( let raw_payload = kitchensink_runtime::SignedPayload::from_raw( function.clone(), - extra.clone(), + tx_ext.clone(), ( - (), - kitchensink_runtime::VERSION.spec_version, - kitchensink_runtime::VERSION.transaction_version, - genesis_hash, - best_hash, - (), - (), + ( + (), + kitchensink_runtime::VERSION.spec_version, + kitchensink_runtime::VERSION.transaction_version, + genesis_hash, + best_hash, + (), + (), + ), (), ), ); @@ -146,7 +151,7 @@ pub fn create_extrinsic( function, sp_runtime::AccountId32::from(sender.public()).into(), kitchensink_runtime::Signature::Sr25519(signature), - extra, + tx_ext, ) } @@ -791,7 +796,7 @@ mod tests { use codec::Encode; use kitchensink_runtime::{ constants::{currency::CENTS, time::SLOT_DURATION}, - Address, BalancesCall, RuntimeCall, UncheckedExtrinsic, + Address, BalancesCall, RuntimeCall, TxExtension, UncheckedExtrinsic, }; use node_primitives::{Block, DigestItem, Signature}; use sc_client_api::BlockBackend; @@ -993,25 +998,31 @@ mod tests { let tx_payment = pallet_skip_feeless_payment::SkipCheckIfFeeless::from( pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None), ); - let extra = ( - check_non_zero_sender, - check_spec_version, - check_tx_version, - check_genesis, - check_era, - check_nonce, - check_weight, + let tx_ext: TxExtension = ( + ( + check_non_zero_sender, + check_spec_version, + check_tx_version, + check_genesis, + check_era, + check_nonce, + check_weight, + ) + .into(), tx_payment, ); let raw_payload = SignedPayload::from_raw( function, - extra, - ((), spec_version, transaction_version, genesis_hash, genesis_hash, (), (), ()), + tx_ext, + ( + ((), spec_version, transaction_version, genesis_hash, genesis_hash, (), ()), + (), + ), ); let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); - let (function, extra, _) = raw_payload.deconstruct(); + let (function, tx_ext, _) = raw_payload.deconstruct(); index += 1; - UncheckedExtrinsic::new_signed(function, from.into(), signature.into(), extra) + UncheckedExtrinsic::new_signed(function, from.into(), signature.into(), tx_ext) .into() }, ); diff --git a/substrate/bin/node/cli/tests/basic.rs b/substrate/bin/node/cli/tests/basic.rs index 525ab2e39c1..5fcb2295ef0 100644 --- a/substrate/bin/node/cli/tests/basic.rs +++ b/substrate/bin/node/cli/tests/basic.rs @@ -67,7 +67,7 @@ fn transfer_fee(extrinsic: &E) -> Balance { fn xt() -> UncheckedExtrinsic { sign(CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, 0)), function: RuntimeCall::Balances(default_transfer_call()), }) } @@ -84,11 +84,11 @@ fn changes_trie_block() -> (Vec, Hash) { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time }), }, CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 69 * DOLLARS, @@ -111,11 +111,11 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time1 }), }, CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 69 * DOLLARS, @@ -131,18 +131,18 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { block1.1, vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time2 }), }, CheckedExtrinsic { - signed: Some((bob(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(bob(), tx_ext(0, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: alice().into(), value: 5 * DOLLARS, }), }, CheckedExtrinsic { - signed: Some((alice(), signed_extra(1, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(1, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 15 * DOLLARS, @@ -166,11 +166,11 @@ fn block_with_size(time: u64, nonce: u32, size: usize) -> (Vec, Hash) { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), }, CheckedExtrinsic { - signed: Some((alice(), signed_extra(nonce, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(nonce, 0)), function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0; size] }), }, ], @@ -677,11 +677,11 @@ fn deploying_wasm_contract_should_work() { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(0, 0)), function: RuntimeCall::Contracts(pallet_contracts::Call::instantiate_with_code::< Runtime, > { @@ -694,7 +694,7 @@ fn deploying_wasm_contract_should_work() { }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(1, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(1, 0)), function: RuntimeCall::Contracts(pallet_contracts::Call::call:: { dest: sp_runtime::MultiAddress::Id(addr.clone()), value: 10, diff --git a/substrate/bin/node/cli/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs index 8c7b3c87315..9d6407067a3 100644 --- a/substrate/bin/node/cli/tests/fees.rs +++ b/substrate/bin/node/cli/tests/fees.rs @@ -54,11 +54,11 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time1 }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(0, 0)), function: RuntimeCall::Sudo(pallet_sudo::Call::sudo { call: Box::new(RuntimeCall::RootTesting( pallet_root_testing::Call::fill_block { ratio: Perbill::from_percent(60) }, @@ -77,11 +77,11 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { block1.1, vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time2 }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(1, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(1, 0)), function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0; 1] }), }, ], @@ -147,7 +147,7 @@ fn transaction_fee_is_correct() { let tip = 1_000_000; let xt = sign(CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, tip))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, tip)), function: RuntimeCall::Balances(default_transfer_call()), }); @@ -211,7 +211,10 @@ fn block_weight_capacity_report() { let num_transfers = block_number * factor; let mut xts = (0..num_transfers) .map(|i| CheckedExtrinsic { - signed: Some((charlie(), signed_extra(nonce + i as Nonce, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed( + charlie(), + tx_ext(nonce + i as Nonce, 0), + ), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 0, @@ -222,7 +225,7 @@ fn block_weight_capacity_report() { xts.insert( 0, CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), }, ); @@ -285,13 +288,16 @@ fn block_length_capacity_report() { previous_hash, vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time * 1000, }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(nonce, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed( + charlie(), + tx_ext(nonce, 0), + ), function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0u8; (block_number * factor) as usize], }), diff --git a/substrate/bin/node/cli/tests/submit_transaction.rs b/substrate/bin/node/cli/tests/submit_transaction.rs index 5cbb0103d47..f3a5bac8fb5 100644 --- a/substrate/bin/node/cli/tests/submit_transaction.rs +++ b/substrate/bin/node/cli/tests/submit_transaction.rs @@ -130,8 +130,8 @@ fn should_submit_signed_twice_from_the_same_account() { // now check that the transaction nonces are not equal let s = state.read(); fn nonce(tx: UncheckedExtrinsic) -> frame_system::CheckNonce { - let extra = tx.signature.unwrap().2; - extra.5 + let extra = tx.preamble.to_signed().unwrap().2; + (extra.0).5 } let nonce1 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[0]).unwrap()); let nonce2 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[1]).unwrap()); @@ -179,8 +179,8 @@ fn should_submit_signed_twice_from_all_accounts() { // now check that the transaction nonces are not equal let s = state.read(); fn nonce(tx: UncheckedExtrinsic) -> frame_system::CheckNonce { - let extra = tx.signature.unwrap().2; - extra.5 + let extra = tx.preamble.to_signed().unwrap().2; + (extra.0).5 } let nonce1 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[0]).unwrap()); let nonce2 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[1]).unwrap()); @@ -236,7 +236,7 @@ fn submitted_transaction_should_be_valid() { let source = TransactionSource::External; let extrinsic = UncheckedExtrinsic::decode(&mut &*tx0).unwrap(); // add balance to the account - let author = extrinsic.signature.clone().unwrap().0; + let author = extrinsic.preamble.clone().to_signed().clone().unwrap().0; let address = Indices::lookup(author).unwrap(); let data = pallet_balances::AccountData { free: 5_000_000_000_000, ..Default::default() }; let account = frame_system::AccountInfo { providers: 1, data, ..Default::default() }; diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 9607d05daf0..bdef42474d0 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -276,6 +276,7 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-alliance/runtime-benchmarks", + "pallet-asset-conversion-tx-payment/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-asset-rate/runtime-benchmarks", "pallet-asset-tx-payment/runtime-benchmarks", @@ -333,6 +334,7 @@ runtime-benchmarks = [ "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-tips/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-transaction-storage/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-tx-pause/runtime-benchmarks", diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 5431628c747..f4615802515 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -560,6 +560,7 @@ impl pallet_transaction_payment::Config for Runtime { MinimumMultiplier, MaximumMultiplier, >; + type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } impl pallet_asset_tx_payment::Config for Runtime { @@ -569,6 +570,9 @@ impl pallet_asset_tx_payment::Config for Runtime { pallet_assets::BalanceToAssetBalance, CreditToBlockAuthor, >; + type WeightInfo = pallet_asset_tx_payment::weights::SubstrateWeight; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetTxHelper; } impl pallet_asset_conversion_tx_payment::Config for Runtime { @@ -579,6 +583,9 @@ impl pallet_asset_conversion_tx_payment::Config for Runtime { AssetConversion, Native, >; + type WeightInfo = pallet_asset_conversion_tx_payment::weights::SubstrateWeight; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetConversionTxHelper; } impl pallet_skip_feeless_payment::Config for Runtime { @@ -1407,29 +1414,33 @@ where // so the actual block number is `n`. .saturating_sub(1); let era = Era::mortal(period, current_block); - let extra = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(era), - frame_system::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), + let tx_ext: TxExtension = ( + ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(era), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + ) + .into(), pallet_skip_feeless_payment::SkipCheckIfFeeless::from( pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from( tip, None, ), ), ); - let raw_payload = SignedPayload::new(call, extra) + + let raw_payload = SignedPayload::new(call, tx_ext) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; let address = Indices::unlookup(account); - let (call, extra, _) = raw_payload.deconstruct(); - Some((call, (address, signature, extra))) + let (call, tx_ext, _) = raw_payload.deconstruct(); + Some((call, (address, signature, tx_ext))) } } @@ -2280,19 +2291,21 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. +/// The TransactionExtension to the basic transaction logic. /// /// When you change this, you **MUST** modify [`sign`] in `bin/node/testing/src/keyring.rs`! /// /// [`sign`]: <../../testing/src/keyring.rs.html> -pub type SignedExtra = ( - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckTxVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, +pub type TxExtension = ( + ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + ), pallet_skip_feeless_payment::SkipCheckIfFeeless< Runtime, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, @@ -2301,11 +2314,11 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; /// Extrinsic type that has already been checked. -pub type CheckedExtrinsic = generic::CheckedExtrinsic; +pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -2360,6 +2373,106 @@ mod mmr { pub type Hashing = ::Hashing; } +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetConversionTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl pallet_asset_conversion_tx_payment::BenchmarkHelperTrait + for AssetConversionTxHelper +{ + fn create_asset_id_parameter(seed: u32) -> (u32, u32) { + (seed, seed) + } + + fn setup_balances_and_pool(asset_id: u32, account: AccountId) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + account.clone().into(), /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = account.clone(); + let _ = Balances::deposit_creating(&lp_provider, ((u64::MAX as u128) * 100).into()); + assert_ok!(Assets::mint_into( + asset_id.into(), + &lp_provider, + ((u64::MAX as u128) * 100).into() + )); + + let token_native = Box::new(NativeOrWithId::Native); + let token_second = Box::new(NativeOrWithId::WithId(asset_id)); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider.clone()), + token_native.clone(), + token_second.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider.clone()), + token_native, + token_second, + u64::MAX.into(), // 1 desired + u64::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider, + )); + } +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl pallet_asset_tx_payment::BenchmarkHelperTrait for AssetTxHelper { + fn create_asset_id_parameter(seed: u32) -> (u32, u32) { + (seed, seed) + } + + fn setup_balances_and_pool(asset_id: u32, account: AccountId) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + account.clone().into(), /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = account.clone(); + let _ = Balances::deposit_creating(&lp_provider, ((u64::MAX as u128) * 100).into()); + assert_ok!(Assets::mint_into( + asset_id.into(), + &lp_provider, + ((u64::MAX as u128) * 100).into() + )); + + let token_native = Box::new(NativeOrWithId::Native); + let token_second = Box::new(NativeOrWithId::WithId(asset_id)); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider.clone()), + token_native.clone(), + token_second.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider.clone()), + token_native, + token_second, + u64::MAX.into(), // 1 desired + u64::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider, + )); + } +} + #[cfg(feature = "runtime-benchmarks")] mod benches { frame_benchmarking::define_benchmarks!( @@ -2380,6 +2493,9 @@ mod benches { [tasks_example, TasksExample] [pallet_democracy, Democracy] [pallet_asset_conversion, AssetConversion] + [pallet_asset_conversion_tx_payment, AssetConversionTxPayment] + [pallet_asset_tx_payment, AssetTxPayment] + [pallet_transaction_payment, TransactionPayment] [pallet_election_provider_multi_phase, ElectionProviderMultiPhase] [pallet_election_provider_support_benchmarking, EPSBench::] [pallet_elections_phragmen, Elections] @@ -2413,6 +2529,7 @@ mod benches { [pallet_state_trie_migration, StateTrieMigration] [pallet_sudo, Sudo] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_timestamp, Timestamp] [pallet_tips, Tips] [pallet_transaction_storage, TransactionStorage] @@ -2960,6 +3077,7 @@ impl_runtime_apis! { use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as EPSBench; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use baseline::Pallet as BaselineBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; @@ -2984,6 +3102,7 @@ impl_runtime_apis! { use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as EPSBench; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use baseline::Pallet as BaselineBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index df302a6453b..ccf79eed884 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -51,6 +51,7 @@ use sp_core::{ed25519, sr25519, traits::SpawnNamed, Pair, Public}; use sp_crypto_hashing::blake2_256; use sp_inherents::InherentData; use sp_runtime::{ + generic::{ExtrinsicFormat, Preamble}, traits::{Block as BlockT, IdentifyAccount, Verify}, OpaqueExtrinsic, }; @@ -295,10 +296,10 @@ impl<'a> Iterator for BlockContentIterator<'a> { let signed = self.keyring.sign( CheckedExtrinsic { - signed: Some(( + format: ExtrinsicFormat::Signed( sender, - signed_extra(0, kitchensink_runtime::ExistentialDeposit::get() + 1), - )), + tx_ext(0, kitchensink_runtime::ExistentialDeposit::get() + 1), + ), function: match self.content.block_type { BlockType::RandomTransfersKeepAlive => RuntimeCall::Balances(BalancesCall::transfer_keep_alive { @@ -562,11 +563,11 @@ impl BenchKeyring { tx_version: u32, genesis_hash: [u8; 32], ) -> UncheckedExtrinsic { - match xt.signed { - Some((signed, extra)) => { + match xt.format { + ExtrinsicFormat::Signed(signed, tx_ext) => { let payload = ( xt.function, - extra.clone(), + tx_ext.clone(), spec_version, tx_version, genesis_hash, @@ -581,11 +582,20 @@ impl BenchKeyring { } }); UncheckedExtrinsic { - signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)), + preamble: Preamble::Signed( + sp_runtime::MultiAddress::Id(signed), + signature, + tx_ext, + ), function: payload.0, } }, - None => UncheckedExtrinsic { signature: None, function: xt.function }, + ExtrinsicFormat::Bare => + UncheckedExtrinsic { preamble: Preamble::Bare, function: xt.function }, + ExtrinsicFormat::General(tx_ext) => UncheckedExtrinsic { + preamble: sp_runtime::generic::Preamble::General(tx_ext), + function: xt.function, + }, } } diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs index f712191bed6..13daf915325 100644 --- a/substrate/bin/node/testing/src/keyring.rs +++ b/substrate/bin/node/testing/src/keyring.rs @@ -19,13 +19,13 @@ //! Test accounts. use codec::Encode; -use kitchensink_runtime::{CheckedExtrinsic, SessionKeys, SignedExtra, UncheckedExtrinsic}; +use kitchensink_runtime::{CheckedExtrinsic, SessionKeys, TxExtension, UncheckedExtrinsic}; use node_cli::chain_spec::get_from_seed; use node_primitives::{AccountId, Balance, Nonce}; use sp_core::{ecdsa, ed25519, sr25519}; use sp_crypto_hashing::blake2_256; use sp_keyring::AccountKeyring; -use sp_runtime::generic::Era; +use sp_runtime::generic::{Era, ExtrinsicFormat}; /// Alice's account id. pub fn alice() -> AccountId { @@ -70,15 +70,18 @@ pub fn session_keys_from_seed(seed: &str) -> SessionKeys { } /// Returns transaction extra. -pub fn signed_extra(nonce: Nonce, extra_fee: Balance) -> SignedExtra { +pub fn tx_ext(nonce: Nonce, extra_fee: Balance) -> TxExtension { ( - frame_system::CheckNonZeroSender::new(), - frame_system::CheckSpecVersion::new(), - frame_system::CheckTxVersion::new(), - frame_system::CheckGenesis::new(), - frame_system::CheckEra::from(Era::mortal(256, 0)), - frame_system::CheckNonce::from(nonce), - frame_system::CheckWeight::new(), + ( + frame_system::CheckNonZeroSender::new(), + frame_system::CheckSpecVersion::new(), + frame_system::CheckTxVersion::new(), + frame_system::CheckGenesis::new(), + frame_system::CheckEra::from(Era::mortal(256, 0)), + frame_system::CheckNonce::from(nonce), + frame_system::CheckWeight::new(), + ) + .into(), pallet_skip_feeless_payment::SkipCheckIfFeeless::from( pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), ), @@ -92,10 +95,10 @@ pub fn sign( tx_version: u32, genesis_hash: [u8; 32], ) -> UncheckedExtrinsic { - match xt.signed { - Some((signed, extra)) => { + match xt.format { + ExtrinsicFormat::Signed(signed, tx_ext) => { let payload = - (xt.function, extra.clone(), spec_version, tx_version, genesis_hash, genesis_hash); + (xt.function, tx_ext.clone(), spec_version, tx_version, genesis_hash, genesis_hash); let key = AccountKeyring::from_account_id(&signed).unwrap(); let signature = payload @@ -108,10 +111,21 @@ pub fn sign( }) .into(); UncheckedExtrinsic { - signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)), + preamble: sp_runtime::generic::Preamble::Signed( + sp_runtime::MultiAddress::Id(signed), + signature, + tx_ext, + ), function: payload.0, } }, - None => UncheckedExtrinsic { signature: None, function: xt.function }, + ExtrinsicFormat::Bare => UncheckedExtrinsic { + preamble: sp_runtime::generic::Preamble::Bare, + function: xt.function, + }, + ExtrinsicFormat::General(tx_ext) => UncheckedExtrinsic { + preamble: sp_runtime::generic::Preamble::General(tx_ext), + function: xt.function, + }, } } diff --git a/substrate/client/api/src/notifications/tests.rs b/substrate/client/api/src/notifications/tests.rs index fba829b1cf9..7afdcbd9543 100644 --- a/substrate/client/api/src/notifications/tests.rs +++ b/substrate/client/api/src/notifications/tests.rs @@ -18,7 +18,10 @@ use super::*; -use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash}; +use sp_runtime::{ + generic::UncheckedExtrinsic, + testing::{Block as RawBlock, H256 as Hash}, +}; use std::iter::{empty, Empty}; type TestChangeSet = ( @@ -50,7 +53,7 @@ impl PartialEq for StorageChangeSet { } } -type Block = RawBlock>; +type Block = RawBlock>; #[test] fn triggering_change_should_notify_wildcard_listeners() { diff --git a/substrate/client/db/benches/state_access.rs b/substrate/client/db/benches/state_access.rs index e47559e710d..9f3b8ca77c2 100644 --- a/substrate/client/db/benches/state_access.rs +++ b/substrate/client/db/benches/state_access.rs @@ -22,12 +22,13 @@ use sc_client_api::{Backend as _, BlockImportOperation, NewBlockState, StateBack use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode}; use sp_core::H256; use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, Header}, + generic::UncheckedExtrinsic, + testing::{Block as RawBlock, Header, MockCallU64}, StateVersion, Storage, }; use tempfile::TempDir; -pub(crate) type Block = RawBlock>; +pub(crate) type Block = RawBlock>; fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 { let mut op = db.begin_operation().unwrap(); diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 0faa90dfc4f..f22022ef29a 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -2573,7 +2573,8 @@ pub(crate) mod tests { use sp_blockchain::{lowest_common_ancestor, tree_route}; use sp_core::H256; use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, Header}, + generic::UncheckedExtrinsic, + testing::{Block as RawBlock, Header, MockCallU64}, traits::{BlakeTwo256, Hash}, ConsensusEngineId, StateVersion, }; @@ -2581,7 +2582,8 @@ pub(crate) mod tests { const CONS0_ENGINE_ID: ConsensusEngineId = *b"CON0"; const CONS1_ENGINE_ID: ConsensusEngineId = *b"CON1"; - pub(crate) type Block = RawBlock>; + type UncheckedXt = UncheckedExtrinsic; + pub(crate) type Block = RawBlock; pub fn insert_header( backend: &Backend, @@ -2600,7 +2602,7 @@ pub(crate) mod tests { parent_hash: H256, _changes: Option, Vec)>>, extrinsics_root: H256, - body: Vec>, + body: Vec, transaction_index: Option>, ) -> Result { use sp_runtime::testing::Digest; @@ -3414,7 +3416,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -3436,11 +3438,20 @@ pub(crate) mod tests { assert_eq!(None, bc.body(blocks[0]).unwrap()); assert_eq!(None, bc.body(blocks[1]).unwrap()); assert_eq!(None, bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(3.into(), ())]), + bc.body(blocks[3]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); } else { for i in 0..5 { - assert_eq!(Some(vec![(i as u64).into()]), bc.body(blocks[i]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction((i as u64).into(), ())]), + bc.body(blocks[i]).unwrap() + ); } } } @@ -3464,7 +3475,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -3473,16 +3484,26 @@ pub(crate) mod tests { } // insert a fork at block 2 - let fork_hash_root = - insert_block(&backend, 2, blocks[1], None, H256::random(), vec![2.into()], None) - .unwrap(); + let fork_hash_root = insert_block( + &backend, + 2, + blocks[1], + None, + H256::random(), + vec![UncheckedXt::new_transaction(2.into(), ())], + None, + ) + .unwrap(); insert_block( &backend, 3, fork_hash_root, None, H256::random(), - vec![3.into(), 11.into()], + vec![ + UncheckedXt::new_transaction(3.into(), ()), + UncheckedXt::new_transaction(11.into(), ()), + ], None, ) .unwrap(); @@ -3492,7 +3513,10 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); let bc = backend.blockchain(); - assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(2.into(), ())]), + bc.body(fork_hash_root).unwrap() + ); for i in 1..5 { let mut op = backend.begin_operation().unwrap(); @@ -3506,16 +3530,28 @@ pub(crate) mod tests { assert_eq!(None, bc.body(blocks[1]).unwrap()); assert_eq!(None, bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(3.into(), ())]), + bc.body(blocks[3]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); } else { for i in 0..5 { - assert_eq!(Some(vec![(i as u64).into()]), bc.body(blocks[i]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction((i as u64).into(), ())]), + bc.body(blocks[i]).unwrap() + ); } } if matches!(pruning, BlocksPruning::KeepAll) { - assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(2.into(), ())]), + bc.body(fork_hash_root).unwrap() + ); } else { assert_eq!(None, bc.body(fork_hash_root).unwrap()); } @@ -3536,8 +3572,16 @@ pub(crate) mod tests { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(10), 10); let make_block = |index, parent, val: u64| { - insert_block(&backend, index, parent, None, H256::random(), vec![val.into()], None) - .unwrap() + insert_block( + &backend, + index, + parent, + None, + H256::random(), + vec![UncheckedXt::new_transaction(val.into(), ())], + None, + ) + .unwrap() }; let block_0 = make_block(0, Default::default(), 0x00); @@ -3565,18 +3609,30 @@ pub(crate) mod tests { let bc = backend.blockchain(); assert_eq!(None, bc.body(block_1b).unwrap()); assert_eq!(None, bc.body(block_2b).unwrap()); - assert_eq!(Some(vec![0x00.into()]), bc.body(block_0).unwrap()); - assert_eq!(Some(vec![0x1a.into()]), bc.body(block_1a).unwrap()); - assert_eq!(Some(vec![0x2a.into()]), bc.body(block_2a).unwrap()); - assert_eq!(Some(vec![0x3a.into()]), bc.body(block_3a).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0x00.into(), ())]), + bc.body(block_0).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0x1a.into(), ())]), + bc.body(block_1a).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0x2a.into(), ())]), + bc.body(block_2a).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0x3a.into(), ())]), + bc.body(block_3a).unwrap() + ); } #[test] fn indexed_data_block_body() { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); - let x0 = ExtrinsicWrapper::from(0u64).encode(); - let x1 = ExtrinsicWrapper::from(1u64).encode(); + let x0 = UncheckedXt::new_transaction(0.into(), ()).encode(); + let x1 = UncheckedXt::new_transaction(1.into(), ()).encode(); let x0_hash = as sp_core::Hasher>::hash(&x0[1..]); let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); let index = vec![ @@ -3597,7 +3653,10 @@ pub(crate) mod tests { Default::default(), None, Default::default(), - vec![0u64.into(), 1u64.into()], + vec![ + UncheckedXt::new_transaction(0.into(), ()), + UncheckedXt::new_transaction(1.into(), ()), + ], Some(index), ) .unwrap(); @@ -3619,8 +3678,9 @@ pub(crate) mod tests { fn index_invalid_size() { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); - let x0 = ExtrinsicWrapper::from(0u64).encode(); - let x1 = ExtrinsicWrapper::from(1u64).encode(); + let x0 = UncheckedXt::new_transaction(0.into(), ()).encode(); + let x1 = UncheckedXt::new_transaction(1.into(), ()).encode(); + let x0_hash = as sp_core::Hasher>::hash(&x0[..]); let x1_hash = as sp_core::Hasher>::hash(&x1[..]); let index = vec![ @@ -3641,7 +3701,10 @@ pub(crate) mod tests { Default::default(), None, Default::default(), - vec![0u64.into(), 1u64.into()], + vec![ + UncheckedXt::new_transaction(0.into(), ()), + UncheckedXt::new_transaction(1.into(), ()), + ], Some(index), ) .unwrap(); @@ -3655,7 +3718,7 @@ pub(crate) mod tests { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); - let x1 = ExtrinsicWrapper::from(0u64).encode(); + let x1 = UncheckedXt::new_transaction(0.into(), ()).encode(); let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); for i in 0..10 { let mut index = Vec::new(); @@ -3675,7 +3738,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], Some(index), ) .unwrap(); @@ -3709,7 +3772,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -3724,7 +3787,7 @@ pub(crate) mod tests { blocks[1], None, sp_core::H256::random(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -3738,7 +3801,7 @@ pub(crate) mod tests { blocks[0], None, sp_core::H256::random(), - vec![42.into()], + vec![UncheckedXt::new_transaction(42.into(), ())], None, ) .unwrap(); @@ -4211,7 +4274,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -4226,7 +4289,10 @@ pub(crate) mod tests { // Check that we can properly access values when there is reference count // but no value. - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(1.into(), ())]), + bc.body(blocks[1]).unwrap() + ); // Block 1 gets pinned three times backend.pin_block(blocks[1]).unwrap(); @@ -4243,27 +4309,42 @@ pub(crate) mod tests { // Block 0, 1, 2, 3 are pinned, so all values should be cached. // Block 4 is inside the pruning window, its value is in db. - assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0.into(), ())]), + bc.body(blocks[0]).unwrap() + ); - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(1.into(), ())]), + bc.body(blocks[1]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(1))), bc.justifications(blocks[1]).unwrap() ); - assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(2.into(), ())]), + bc.body(blocks[2]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(2))), bc.justifications(blocks[2]).unwrap() ); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(3.into(), ())]), + bc.body(blocks[3]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(3))), bc.justifications(blocks[3]).unwrap() ); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(4))), bc.justifications(blocks[4]).unwrap() @@ -4294,7 +4375,10 @@ pub(crate) mod tests { assert!(bc.justifications(blocks[1]).unwrap().is_none()); // Block 4 is inside the pruning window and still kept - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(4))), bc.justifications(blocks[4]).unwrap() @@ -4302,9 +4386,16 @@ pub(crate) mod tests { // Block tree: // 0 -> 1 -> 2 -> 3 -> 4 -> 5 - let hash = - insert_block(&backend, 5, prev_hash, None, Default::default(), vec![5.into()], None) - .unwrap(); + let hash = insert_block( + &backend, + 5, + prev_hash, + None, + Default::default(), + vec![UncheckedXt::new_transaction(5.into(), ())], + None, + ) + .unwrap(); blocks.push(hash); backend.pin_block(blocks[4]).unwrap(); @@ -4319,12 +4410,18 @@ pub(crate) mod tests { assert!(bc.body(blocks[2]).unwrap().is_none()); assert!(bc.body(blocks[3]).unwrap().is_none()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(4))), bc.justifications(blocks[4]).unwrap() ); - assert_eq!(Some(vec![5.into()]), bc.body(blocks[5]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(5.into(), ())]), + bc.body(blocks[5]).unwrap() + ); assert!(bc.header(blocks[5]).ok().flatten().is_some()); backend.unpin_block(blocks[4]); @@ -4334,9 +4431,16 @@ pub(crate) mod tests { // Append a justification to block 5. backend.append_justification(blocks[5], ([0, 0, 0, 1], vec![42])).unwrap(); - let hash = - insert_block(&backend, 6, blocks[5], None, Default::default(), vec![6.into()], None) - .unwrap(); + let hash = insert_block( + &backend, + 6, + blocks[5], + None, + Default::default(), + vec![UncheckedXt::new_transaction(6.into(), ())], + None, + ) + .unwrap(); blocks.push(hash); // Pin block 5 so it gets loaded into the cache on prune @@ -4349,7 +4453,10 @@ pub(crate) mod tests { op.mark_finalized(blocks[6], None).unwrap(); backend.commit_operation(op).unwrap(); - assert_eq!(Some(vec![5.into()]), bc.body(blocks[5]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(5.into(), ())]), + bc.body(blocks[5]).unwrap() + ); assert!(bc.header(blocks[5]).ok().flatten().is_some()); let mut expected = Justifications::from(build_justification(5)); expected.append(([0, 0, 0, 1], vec![42])); @@ -4371,7 +4478,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -4387,16 +4494,26 @@ pub(crate) mod tests { // Block tree: // 0 -> 1 -> 2 -> 3 -> 4 // \ -> 2 -> 3 - let fork_hash_root = - insert_block(&backend, 2, blocks[1], None, H256::random(), vec![2.into()], None) - .unwrap(); + let fork_hash_root = insert_block( + &backend, + 2, + blocks[1], + None, + H256::random(), + vec![UncheckedXt::new_transaction(2.into(), ())], + None, + ) + .unwrap(); let fork_hash_3 = insert_block( &backend, 3, fork_hash_root, None, H256::random(), - vec![3.into(), 11.into()], + vec![ + UncheckedXt::new_transaction(3.into(), ()), + UncheckedXt::new_transaction(11.into(), ()), + ], None, ) .unwrap(); @@ -4417,14 +4534,35 @@ pub(crate) mod tests { } let bc = backend.blockchain(); - assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0.into(), ())]), + bc.body(blocks[0]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(1.into(), ())]), + bc.body(blocks[1]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(2.into(), ())]), + bc.body(blocks[2]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(3.into(), ())]), + bc.body(blocks[3]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); // Check the fork hashes. assert_eq!(None, bc.body(fork_hash_root).unwrap()); - assert_eq!(Some(vec![3.into(), 11.into()]), bc.body(fork_hash_3).unwrap()); + assert_eq!( + Some(vec![ + UncheckedXt::new_transaction(3.into(), ()), + UncheckedXt::new_transaction(11.into(), ()) + ]), + bc.body(fork_hash_3).unwrap() + ); // Unpin all blocks, except the forked one. for block in &blocks { diff --git a/substrate/client/db/src/utils.rs b/substrate/client/db/src/utils.rs index abf9c4629ce..d2a5f7e718a 100644 --- a/substrate/client/db/src/utils.rs +++ b/substrate/client/db/src/utils.rs @@ -582,14 +582,19 @@ impl<'a, 'b> codec::Input for JoinInput<'a, 'b> { mod tests { use super::*; use codec::Input; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; - type Block = RawBlock>; + use sp_runtime::{ + generic::UncheckedExtrinsic, + testing::{Block as RawBlock, MockCallU64}, + }; + + pub type UncheckedXt = UncheckedExtrinsic; + type Block = RawBlock; #[cfg(feature = "rocksdb")] #[test] fn database_type_subdir_migration() { use std::path::PathBuf; - type Block = RawBlock>; + type Block = RawBlock; fn check_dir_for_db_type( db_type: DatabaseType, diff --git a/substrate/client/network-gossip/src/state_machine.rs b/substrate/client/network-gossip/src/state_machine.rs index 069d7cdba16..f1c830341ea 100644 --- a/substrate/client/network-gossip/src/state_machine.rs +++ b/substrate/client/network-gossip/src/state_machine.rs @@ -550,7 +550,8 @@ mod tests { NotificationSenderError, NotificationSenderT as NotificationSender, ReputationChange, }; use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, H256}, + generic::UncheckedExtrinsic, + testing::{Block as RawBlock, MockCallU64, H256}, traits::NumberFor, }; use std::{ @@ -559,7 +560,7 @@ mod tests { sync::{Arc, Mutex}, }; - type Block = RawBlock>; + type Block = RawBlock>; macro_rules! push_msg { ($consensus:expr, $topic:expr, $hash: expr, $m:expr) => { diff --git a/substrate/client/network/sync/src/blocks.rs b/substrate/client/network/sync/src/blocks.rs index 4988045a478..a115ee94767 100644 --- a/substrate/client/network/sync/src/blocks.rs +++ b/substrate/client/network/sync/src/blocks.rs @@ -265,9 +265,12 @@ mod test { use libp2p::PeerId; use sc_network_common::sync::message; use sp_core::H256; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; + use sp_runtime::{ + generic::UncheckedExtrinsic, + testing::{Block as RawBlock, MockCallU64}, + }; - type Block = RawBlock>; + type Block = RawBlock>; fn is_empty(bc: &BlockCollection) -> bool { bc.blocks.is_empty() && bc.peer_requests.is_empty() diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs index 64b301e6bf3..730cfe36712 100644 --- a/substrate/client/transaction-pool/src/lib.rs +++ b/substrate/client/transaction-pool/src/lib.rs @@ -659,8 +659,13 @@ where }) .unwrap_or_default() .into_iter() - .filter(|tx| tx.is_signed().unwrap_or(true)); - + // TODO [#2415]: This isn't really what we mean - we really want a + // `tx.is_transaction`, since bare transactions may be gossipped as in the case + // of Frontier txs or claims. This will be sorted once we dispense with the + // concept of bare transactions and make inherents the only possible type of + // extrinsics which are bare. At this point we can change this to + // `tx.is_transaction()`. + .filter(|tx| !tx.is_bare()); let mut resubmitted_to_report = 0; resubmit_transactions.extend(block_transactions.into_iter().filter(|tx| { diff --git a/substrate/frame/alliance/src/weights.rs b/substrate/frame/alliance/src/weights.rs index b5bb5095720..0b2d1fca43c 100644 --- a/substrate/frame/alliance/src/weights.rs +++ b/substrate/frame/alliance/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_alliance +//! Autogenerated weights for `pallet_alliance` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/alliance/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/alliance/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_alliance. +/// Weight functions needed for `pallet_alliance`. pub trait WeightInfo { fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight; fn vote(m: u32, ) -> Weight; @@ -74,205 +73,209 @@ pub trait WeightInfo { fn abdicate_fellow_status() -> Weight; } -/// Weights for pallet_alliance using the Substrate node and recommended hardware. +/// Weights for `pallet_alliance` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion ProposalOf (r:1 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalCount (r:1 w:1) - /// Proof Skipped: AllianceMotion ProposalCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Voting (r:0 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalCount` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:0 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[1, 1024]`. /// The range of component `m` is `[2, 100]`. /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `653 + m * (32 ±0) + p * (35 ±0)` + // Measured: `654 + m * (32 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (32 ±0) + p * (36 ±0)` - // Minimum execution time: 36_908_000 picoseconds. - Weight::from_parts(39_040_304, 6676) - // Standard Error: 131 - .saturating_add(Weight::from_parts(781, 0).saturating_mul(b.into())) - // Standard Error: 1_375 - .saturating_add(Weight::from_parts(48_745, 0).saturating_mul(m.into())) - // Standard Error: 1_358 - .saturating_add(Weight::from_parts(148_047, 0).saturating_mul(p.into())) + // Minimum execution time: 30_801_000 picoseconds. + Weight::from_parts(32_942_969, 6676) + // Standard Error: 112 + .saturating_add(Weight::from_parts(614, 0).saturating_mul(b.into())) + // Standard Error: 1_177 + .saturating_add(Weight::from_parts(45_758, 0).saturating_mul(m.into())) + // Standard Error: 1_162 + .saturating_add(Weight::from_parts(136_600, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1042 + m * (64 ±0)` + // Measured: `1113 + m * (64 ±0)` // Estimated: `6676 + m * (64 ±0)` - // Minimum execution time: 30_166_000 picoseconds. - Weight::from_parts(32_798_454, 6676) - // Standard Error: 1_432 - .saturating_add(Weight::from_parts(83_001, 0).saturating_mul(m.into())) + // Minimum execution time: 29_705_000 picoseconds. + Weight::from_parts(30_274_070, 6676) + // Standard Error: 884 + .saturating_add(Weight::from_parts(71_178, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:1 w:0) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalOf (r:0 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `576 + m * (96 Β±0) + p * (36 Β±0)` + // Measured: `640 + m * (96 Β±0) + p * (36 Β±0)` // Estimated: `6676 + m * (97 Β±0) + p * (36 Β±0)` - // Minimum execution time: 45_173_000 picoseconds. - Weight::from_parts(42_192_020, 6676) - // Standard Error: 1_456 - .saturating_add(Weight::from_parts(66_751, 0).saturating_mul(m.into())) - // Standard Error: 1_420 - .saturating_add(Weight::from_parts(158_161, 0).saturating_mul(p.into())) + // Minimum execution time: 38_596_000 picoseconds. + Weight::from_parts(36_445_536, 6676) + // Standard Error: 1_217 + .saturating_add(Weight::from_parts(69_976, 0).saturating_mul(m.into())) + // Standard Error: 1_187 + .saturating_add(Weight::from_parts(149_706, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:1 w:0) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalOf (r:1 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `b` is `[1, 1024]`. /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1087 + m * (96 Β±0) + p * (39 Β±0)` + // Measured: `1220 + m * (96 Β±0) + p * (39 Β±0)` // Estimated: `6676 + m * (97 Β±0) + p * (40 Β±0)` - // Minimum execution time: 58_290_000 picoseconds. - Weight::from_parts(54_924_919, 6676) - // Standard Error: 157 - .saturating_add(Weight::from_parts(464, 0).saturating_mul(b.into())) - // Standard Error: 1_665 - .saturating_add(Weight::from_parts(73_183, 0).saturating_mul(m.into())) - // Standard Error: 1_623 - .saturating_add(Weight::from_parts(168_318, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Minimum execution time: 57_602_000 picoseconds. + Weight::from_parts(55_147_214, 6676) + // Standard Error: 127 + .saturating_add(Weight::from_parts(1_650, 0).saturating_mul(b.into())) + // Standard Error: 1_346 + .saturating_add(Weight::from_parts(56_056, 0).saturating_mul(m.into())) + // Standard Error: 1_312 + .saturating_add(Weight::from_parts(168_247, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:1 w:0) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:1 w:0) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalOf (r:0 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[2, 100]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `577 + m * (96 Β±0) + p * (36 Β±0)` + // Measured: `641 + m * (96 Β±0) + p * (36 Β±0)` // Estimated: `6676 + m * (97 Β±0) + p * (36 Β±0)` - // Minimum execution time: 46_794_000 picoseconds. - Weight::from_parts(43_092_958, 6676) - // Standard Error: 1_273 - .saturating_add(Weight::from_parts(71_054, 0).saturating_mul(m.into())) - // Standard Error: 1_257 - .saturating_add(Weight::from_parts(152_820, 0).saturating_mul(p.into())) + // Minimum execution time: 40_755_000 picoseconds. + Weight::from_parts(36_953_935, 6676) + // Standard Error: 1_177 + .saturating_add(Weight::from_parts(73_240, 0).saturating_mul(m.into())) + // Standard Error: 1_162 + .saturating_add(Weight::from_parts(149_412, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:1 w:0) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:1 w:0) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalOf (r:0 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[1, 1024]`. /// The range of component `m` is `[5, 100]`. /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `684 + m * (96 Β±0) + p * (35 Β±0)` + // Measured: `694 + m * (96 Β±0) + p * (35 Β±0)` // Estimated: `6676 + m * (97 Β±0) + p * (36 Β±0)` - // Minimum execution time: 47_338_000 picoseconds. - Weight::from_parts(41_257_479, 6676) - // Standard Error: 119 - .saturating_add(Weight::from_parts(1_019, 0).saturating_mul(b.into())) - // Standard Error: 1_277 - .saturating_add(Weight::from_parts(78_453, 0).saturating_mul(m.into())) - // Standard Error: 1_231 - .saturating_add(Weight::from_parts(150_991, 0).saturating_mul(p.into())) + // Minimum execution time: 41_113_000 picoseconds. + Weight::from_parts(36_610_116, 6676) + // Standard Error: 92 + .saturating_add(Weight::from_parts(1_157, 0).saturating_mul(b.into())) + // Standard Error: 984 + .saturating_add(Weight::from_parts(63_050, 0).saturating_mul(m.into())) + // Standard Error: 949 + .saturating_add(Weight::from_parts(150_420, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:2 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Members (r:1 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:1 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 100]`. /// The range of component `z` is `[0, 100]`. fn init_members(m: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `217` + // Measured: `250` // Estimated: `12362` - // Minimum execution time: 35_012_000 picoseconds. - Weight::from_parts(24_288_079, 12362) - // Standard Error: 878 - .saturating_add(Weight::from_parts(153_615, 0).saturating_mul(m.into())) - // Standard Error: 867 - .saturating_add(Weight::from_parts(129_307, 0).saturating_mul(z.into())) + // Minimum execution time: 30_249_000 picoseconds. + Weight::from_parts(21_364_868, 12362) + // Standard Error: 887 + .saturating_add(Weight::from_parts(131_624, 0).saturating_mul(m.into())) + // Standard Error: 877 + .saturating_add(Weight::from_parts(105_379, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Alliance Members (r:2 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Alliance DepositOf (r:200 w:50) - /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: System Account (r:50 w:50) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::DepositOf` (r:200 w:50) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:50 w:50) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `x` is `[1, 100]`. /// The range of component `y` is `[0, 100]`. /// The range of component `z` is `[0, 50]`. @@ -280,14 +283,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + x * (50 Β±0) + y * (51 Β±0) + z * (251 Β±0)` // Estimated: `12362 + x * (2539 Β±0) + y * (2539 Β±0) + z * (2603 Β±1)` - // Minimum execution time: 309_235_000 picoseconds. - Weight::from_parts(311_279_000, 12362) - // Standard Error: 26_510 - .saturating_add(Weight::from_parts(543_475, 0).saturating_mul(x.into())) - // Standard Error: 26_382 - .saturating_add(Weight::from_parts(603_169, 0).saturating_mul(y.into())) - // Standard Error: 52_716 - .saturating_add(Weight::from_parts(16_264_836, 0).saturating_mul(z.into())) + // Minimum execution time: 307_414_000 picoseconds. + Weight::from_parts(309_960_000, 12362) + // Standard Error: 29_278 + .saturating_add(Weight::from_parts(588_774, 0).saturating_mul(x.into())) + // Standard Error: 29_137 + .saturating_add(Weight::from_parts(563_245, 0).saturating_mul(y.into())) + // Standard Error: 58_221 + .saturating_add(Weight::from_parts(13_947_604, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(y.into()))) @@ -298,397 +301,401 @@ impl WeightInfo for SubstrateWeight { .saturating_add(Weight::from_parts(0, 2539).saturating_mul(y.into())) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(z.into())) } - /// Storage: Alliance Rule (r:0 w:1) - /// Proof: Alliance Rule (max_values: Some(1), max_size: Some(87), added: 582, mode: MaxEncodedLen) + /// Storage: `Alliance::Rule` (r:0 w:1) + /// Proof: `Alliance::Rule` (`max_values`: Some(1), `max_size`: Some(87), added: 582, mode: `MaxEncodedLen`) fn set_rule() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_833_000 picoseconds. - Weight::from_parts(9_313_000, 0) + // Minimum execution time: 6_156_000 picoseconds. + Weight::from_parts(6_560_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Alliance Announcements (r:1 w:1) - /// Proof: Alliance Announcements (max_values: Some(1), max_size: Some(8702), added: 9197, mode: MaxEncodedLen) + /// Storage: `Alliance::Announcements` (r:1 w:1) + /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) fn announce() -> Weight { // Proof Size summary in bytes: - // Measured: `246` + // Measured: `279` // Estimated: `10187` - // Minimum execution time: 12_231_000 picoseconds. - Weight::from_parts(12_761_000, 10187) + // Minimum execution time: 8_988_000 picoseconds. + Weight::from_parts(9_476_000, 10187) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Alliance Announcements (r:1 w:1) - /// Proof: Alliance Announcements (max_values: Some(1), max_size: Some(8702), added: 9197, mode: MaxEncodedLen) + /// Storage: `Alliance::Announcements` (r:1 w:1) + /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) fn remove_announcement() -> Weight { // Proof Size summary in bytes: - // Measured: `319` + // Measured: `352` // Estimated: `10187` - // Minimum execution time: 13_079_000 picoseconds. - Weight::from_parts(13_612_000, 10187) + // Minimum execution time: 10_126_000 picoseconds. + Weight::from_parts(10_755_000, 10187) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Alliance Members (r:3 w:1) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: Alliance UnscrupulousAccounts (r:1 w:0) - /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Alliance DepositOf (r:0 w:1) - /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:0) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Alliance::DepositOf` (r:0 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) fn join_alliance() -> Weight { // Proof Size summary in bytes: - // Measured: `468` + // Measured: `501` // Estimated: `18048` - // Minimum execution time: 44_574_000 picoseconds. - Weight::from_parts(46_157_000, 18048) + // Minimum execution time: 38_878_000 picoseconds. + Weight::from_parts(40_493_000, 18048) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Alliance Members (r:3 w:1) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: Alliance UnscrupulousAccounts (r:1 w:0) - /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:0) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) fn nominate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `400` // Estimated: `18048` - // Minimum execution time: 26_114_000 picoseconds. - Weight::from_parts(27_069_000, 18048) + // Minimum execution time: 23_265_000 picoseconds. + Weight::from_parts(24_703_000, 18048) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Alliance Members (r:2 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn elevate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `443` + // Measured: `476` // Estimated: `12362` - // Minimum execution time: 25_882_000 picoseconds. - Weight::from_parts(26_923_000, 12362) + // Minimum execution time: 23_049_000 picoseconds. + Weight::from_parts(23_875_000, 12362) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Alliance Members (r:4 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Alliance RetiringMembers (r:0 w:1) - /// Proof: Alliance RetiringMembers (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Alliance::Members` (r:4 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::RetiringMembers` (r:0 w:1) + /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn give_retirement_notice() -> Weight { // Proof Size summary in bytes: - // Measured: `443` + // Measured: `476` // Estimated: `23734` - // Minimum execution time: 34_112_000 picoseconds. - Weight::from_parts(35_499_000, 23734) + // Minimum execution time: 29_124_000 picoseconds. + Weight::from_parts(30_369_000, 23734) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Alliance RetiringMembers (r:1 w:1) - /// Proof: Alliance RetiringMembers (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: Alliance Members (r:1 w:1) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: Alliance DepositOf (r:1 w:1) - /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Alliance::RetiringMembers` (r:1 w:1) + /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Alliance::Members` (r:1 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::DepositOf` (r:1 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn retire() -> Weight { // Proof Size summary in bytes: - // Measured: `687` + // Measured: `720` // Estimated: `6676` - // Minimum execution time: 41_239_000 picoseconds. - Weight::from_parts(42_764_000, 6676) + // Minimum execution time: 36_376_000 picoseconds. + Weight::from_parts(38_221_000, 6676) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Alliance Members (r:3 w:1) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Alliance DepositOf (r:1 w:1) - /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::DepositOf` (r:1 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn kick_member() -> Weight { // Proof Size summary in bytes: - // Measured: `707` + // Measured: `740` // Estimated: `18048` - // Minimum execution time: 68_071_000 picoseconds. - Weight::from_parts(71_808_000, 18048) + // Minimum execution time: 56_560_000 picoseconds. + Weight::from_parts(58_621_000, 18048) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Alliance UnscrupulousAccounts (r:1 w:1) - /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: Alliance UnscrupulousWebsites (r:1 w:1) - /// Proof: Alliance UnscrupulousWebsites (max_values: Some(1), max_size: Some(25702), added: 26197, mode: MaxEncodedLen) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousWebsites` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousWebsites` (`max_values`: Some(1), `max_size`: Some(25702), added: 26197, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. /// The range of component `l` is `[0, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `246` + // Measured: `279` // Estimated: `27187` - // Minimum execution time: 7_006_000 picoseconds. - Weight::from_parts(7_253_000, 27187) - // Standard Error: 3_403 - .saturating_add(Weight::from_parts(1_680_082, 0).saturating_mul(n.into())) - // Standard Error: 1_333 - .saturating_add(Weight::from_parts(72_943, 0).saturating_mul(l.into())) + // Minimum execution time: 5_191_000 picoseconds. + Weight::from_parts(5_410_000, 27187) + // Standard Error: 3_215 + .saturating_add(Weight::from_parts(1_018_569, 0).saturating_mul(n.into())) + // Standard Error: 1_259 + .saturating_add(Weight::from_parts(68_712, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Alliance UnscrupulousAccounts (r:1 w:1) - /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: Alliance UnscrupulousWebsites (r:1 w:1) - /// Proof: Alliance UnscrupulousWebsites (max_values: Some(1), max_size: Some(25702), added: 26197, mode: MaxEncodedLen) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousWebsites` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousWebsites` (`max_values`: Some(1), `max_size`: Some(25702), added: 26197, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. /// The range of component `l` is `[0, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + l * (100 Β±0) + n * (289 Β±0)` // Estimated: `27187` - // Minimum execution time: 7_292_000 picoseconds. - Weight::from_parts(7_629_000, 27187) - // Standard Error: 176_225 - .saturating_add(Weight::from_parts(16_646_429, 0).saturating_mul(n.into())) - // Standard Error: 69_017 - .saturating_add(Weight::from_parts(310_978, 0).saturating_mul(l.into())) + // Minimum execution time: 5_361_000 picoseconds. + Weight::from_parts(5_494_000, 27187) + // Standard Error: 181_133 + .saturating_add(Weight::from_parts(16_322_982, 0).saturating_mul(n.into())) + // Standard Error: 70_940 + .saturating_add(Weight::from_parts(343_581, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Alliance Members (r:3 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:3 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn abdicate_fellow_status() -> Weight { // Proof Size summary in bytes: - // Measured: `443` + // Measured: `476` // Estimated: `18048` - // Minimum execution time: 31_798_000 picoseconds. - Weight::from_parts(33_463_000, 18048) + // Minimum execution time: 28_856_000 picoseconds. + Weight::from_parts(29_875_000, 18048) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion ProposalOf (r:1 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalCount (r:1 w:1) - /// Proof Skipped: AllianceMotion ProposalCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Voting (r:0 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalCount` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:0 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[1, 1024]`. /// The range of component `m` is `[2, 100]`. /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `653 + m * (32 Β±0) + p * (35 Β±0)` + // Measured: `654 + m * (32 Β±0) + p * (36 Β±0)` // Estimated: `6676 + m * (32 Β±0) + p * (36 Β±0)` - // Minimum execution time: 36_908_000 picoseconds. - Weight::from_parts(39_040_304, 6676) - // Standard Error: 131 - .saturating_add(Weight::from_parts(781, 0).saturating_mul(b.into())) - // Standard Error: 1_375 - .saturating_add(Weight::from_parts(48_745, 0).saturating_mul(m.into())) - // Standard Error: 1_358 - .saturating_add(Weight::from_parts(148_047, 0).saturating_mul(p.into())) + // Minimum execution time: 30_801_000 picoseconds. + Weight::from_parts(32_942_969, 6676) + // Standard Error: 112 + .saturating_add(Weight::from_parts(614, 0).saturating_mul(b.into())) + // Standard Error: 1_177 + .saturating_add(Weight::from_parts(45_758, 0).saturating_mul(m.into())) + // Standard Error: 1_162 + .saturating_add(Weight::from_parts(136_600, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1042 + m * (64 Β±0)` + // Measured: `1113 + m * (64 Β±0)` // Estimated: `6676 + m * (64 Β±0)` - // Minimum execution time: 30_166_000 picoseconds. - Weight::from_parts(32_798_454, 6676) - // Standard Error: 1_432 - .saturating_add(Weight::from_parts(83_001, 0).saturating_mul(m.into())) + // Minimum execution time: 29_705_000 picoseconds. + Weight::from_parts(30_274_070, 6676) + // Standard Error: 884 + .saturating_add(Weight::from_parts(71_178, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:1 w:0) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalOf (r:0 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `576 + m * (96 Β±0) + p * (36 Β±0)` + // Measured: `640 + m * (96 Β±0) + p * (36 Β±0)` // Estimated: `6676 + m * (97 Β±0) + p * (36 Β±0)` - // Minimum execution time: 45_173_000 picoseconds. - Weight::from_parts(42_192_020, 6676) - // Standard Error: 1_456 - .saturating_add(Weight::from_parts(66_751, 0).saturating_mul(m.into())) - // Standard Error: 1_420 - .saturating_add(Weight::from_parts(158_161, 0).saturating_mul(p.into())) + // Minimum execution time: 38_596_000 picoseconds. + Weight::from_parts(36_445_536, 6676) + // Standard Error: 1_217 + .saturating_add(Weight::from_parts(69_976, 0).saturating_mul(m.into())) + // Standard Error: 1_187 + .saturating_add(Weight::from_parts(149_706, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:1 w:0) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalOf (r:1 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `b` is `[1, 1024]`. /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1087 + m * (96 Β±0) + p * (39 Β±0)` + // Measured: `1220 + m * (96 Β±0) + p * (39 Β±0)` // Estimated: `6676 + m * (97 Β±0) + p * (40 Β±0)` - // Minimum execution time: 58_290_000 picoseconds. - Weight::from_parts(54_924_919, 6676) - // Standard Error: 157 - .saturating_add(Weight::from_parts(464, 0).saturating_mul(b.into())) - // Standard Error: 1_665 - .saturating_add(Weight::from_parts(73_183, 0).saturating_mul(m.into())) - // Standard Error: 1_623 - .saturating_add(Weight::from_parts(168_318, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Minimum execution time: 57_602_000 picoseconds. + Weight::from_parts(55_147_214, 6676) + // Standard Error: 127 + .saturating_add(Weight::from_parts(1_650, 0).saturating_mul(b.into())) + // Standard Error: 1_346 + .saturating_add(Weight::from_parts(56_056, 0).saturating_mul(m.into())) + // Standard Error: 1_312 + .saturating_add(Weight::from_parts(168_247, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:1 w:0) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:1 w:0) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalOf (r:0 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[2, 100]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `577 + m * (96 Β±0) + p * (36 Β±0)` + // Measured: `641 + m * (96 Β±0) + p * (36 Β±0)` // Estimated: `6676 + m * (97 Β±0) + p * (36 Β±0)` - // Minimum execution time: 46_794_000 picoseconds. - Weight::from_parts(43_092_958, 6676) - // Standard Error: 1_273 - .saturating_add(Weight::from_parts(71_054, 0).saturating_mul(m.into())) - // Standard Error: 1_257 - .saturating_add(Weight::from_parts(152_820, 0).saturating_mul(p.into())) + // Minimum execution time: 40_755_000 picoseconds. + Weight::from_parts(36_953_935, 6676) + // Standard Error: 1_177 + .saturating_add(Weight::from_parts(73_240, 0).saturating_mul(m.into())) + // Standard Error: 1_162 + .saturating_add(Weight::from_parts(149_412, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:1 w:0) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Voting (r:1 w:1) - /// Proof Skipped: AllianceMotion Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:1 w:0) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:1 w:0) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Proposals (r:1 w:1) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion ProposalOf (r:0 w:1) - /// Proof Skipped: AllianceMotion ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[1, 1024]`. /// The range of component `m` is `[5, 100]`. /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `684 + m * (96 Β±0) + p * (35 Β±0)` + // Measured: `694 + m * (96 Β±0) + p * (35 Β±0)` // Estimated: `6676 + m * (97 Β±0) + p * (36 Β±0)` - // Minimum execution time: 47_338_000 picoseconds. - Weight::from_parts(41_257_479, 6676) - // Standard Error: 119 - .saturating_add(Weight::from_parts(1_019, 0).saturating_mul(b.into())) - // Standard Error: 1_277 - .saturating_add(Weight::from_parts(78_453, 0).saturating_mul(m.into())) - // Standard Error: 1_231 - .saturating_add(Weight::from_parts(150_991, 0).saturating_mul(p.into())) + // Minimum execution time: 41_113_000 picoseconds. + Weight::from_parts(36_610_116, 6676) + // Standard Error: 92 + .saturating_add(Weight::from_parts(1_157, 0).saturating_mul(b.into())) + // Standard Error: 984 + .saturating_add(Weight::from_parts(63_050, 0).saturating_mul(m.into())) + // Standard Error: 949 + .saturating_add(Weight::from_parts(150_420, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Alliance Members (r:2 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Members (r:1 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:1 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 100]`. /// The range of component `z` is `[0, 100]`. fn init_members(m: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `217` + // Measured: `250` // Estimated: `12362` - // Minimum execution time: 35_012_000 picoseconds. - Weight::from_parts(24_288_079, 12362) - // Standard Error: 878 - .saturating_add(Weight::from_parts(153_615, 0).saturating_mul(m.into())) - // Standard Error: 867 - .saturating_add(Weight::from_parts(129_307, 0).saturating_mul(z.into())) + // Minimum execution time: 30_249_000 picoseconds. + Weight::from_parts(21_364_868, 12362) + // Standard Error: 887 + .saturating_add(Weight::from_parts(131_624, 0).saturating_mul(m.into())) + // Standard Error: 877 + .saturating_add(Weight::from_parts(105_379, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Alliance Members (r:2 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Alliance DepositOf (r:200 w:50) - /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: System Account (r:50 w:50) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::DepositOf` (r:200 w:50) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:50 w:50) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `x` is `[1, 100]`. /// The range of component `y` is `[0, 100]`. /// The range of component `z` is `[0, 50]`. @@ -696,14 +703,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + x * (50 Β±0) + y * (51 Β±0) + z * (251 Β±0)` // Estimated: `12362 + x * (2539 Β±0) + y * (2539 Β±0) + z * (2603 Β±1)` - // Minimum execution time: 309_235_000 picoseconds. - Weight::from_parts(311_279_000, 12362) - // Standard Error: 26_510 - .saturating_add(Weight::from_parts(543_475, 0).saturating_mul(x.into())) - // Standard Error: 26_382 - .saturating_add(Weight::from_parts(603_169, 0).saturating_mul(y.into())) - // Standard Error: 52_716 - .saturating_add(Weight::from_parts(16_264_836, 0).saturating_mul(z.into())) + // Minimum execution time: 307_414_000 picoseconds. + Weight::from_parts(309_960_000, 12362) + // Standard Error: 29_278 + .saturating_add(Weight::from_parts(588_774, 0).saturating_mul(x.into())) + // Standard Error: 29_137 + .saturating_add(Weight::from_parts(563_245, 0).saturating_mul(y.into())) + // Standard Error: 58_221 + .saturating_add(Weight::from_parts(13_947_604, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(x.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(y.into()))) @@ -714,194 +721,194 @@ impl WeightInfo for () { .saturating_add(Weight::from_parts(0, 2539).saturating_mul(y.into())) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(z.into())) } - /// Storage: Alliance Rule (r:0 w:1) - /// Proof: Alliance Rule (max_values: Some(1), max_size: Some(87), added: 582, mode: MaxEncodedLen) + /// Storage: `Alliance::Rule` (r:0 w:1) + /// Proof: `Alliance::Rule` (`max_values`: Some(1), `max_size`: Some(87), added: 582, mode: `MaxEncodedLen`) fn set_rule() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_833_000 picoseconds. - Weight::from_parts(9_313_000, 0) + // Minimum execution time: 6_156_000 picoseconds. + Weight::from_parts(6_560_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Alliance Announcements (r:1 w:1) - /// Proof: Alliance Announcements (max_values: Some(1), max_size: Some(8702), added: 9197, mode: MaxEncodedLen) + /// Storage: `Alliance::Announcements` (r:1 w:1) + /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) fn announce() -> Weight { // Proof Size summary in bytes: - // Measured: `246` + // Measured: `279` // Estimated: `10187` - // Minimum execution time: 12_231_000 picoseconds. - Weight::from_parts(12_761_000, 10187) + // Minimum execution time: 8_988_000 picoseconds. + Weight::from_parts(9_476_000, 10187) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Alliance Announcements (r:1 w:1) - /// Proof: Alliance Announcements (max_values: Some(1), max_size: Some(8702), added: 9197, mode: MaxEncodedLen) + /// Storage: `Alliance::Announcements` (r:1 w:1) + /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) fn remove_announcement() -> Weight { // Proof Size summary in bytes: - // Measured: `319` + // Measured: `352` // Estimated: `10187` - // Minimum execution time: 13_079_000 picoseconds. - Weight::from_parts(13_612_000, 10187) + // Minimum execution time: 10_126_000 picoseconds. + Weight::from_parts(10_755_000, 10187) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Alliance Members (r:3 w:1) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: Alliance UnscrupulousAccounts (r:1 w:0) - /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Alliance DepositOf (r:0 w:1) - /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:0) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Alliance::DepositOf` (r:0 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) fn join_alliance() -> Weight { // Proof Size summary in bytes: - // Measured: `468` + // Measured: `501` // Estimated: `18048` - // Minimum execution time: 44_574_000 picoseconds. - Weight::from_parts(46_157_000, 18048) + // Minimum execution time: 38_878_000 picoseconds. + Weight::from_parts(40_493_000, 18048) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Alliance Members (r:3 w:1) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: Alliance UnscrupulousAccounts (r:1 w:0) - /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:0) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) fn nominate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `400` // Estimated: `18048` - // Minimum execution time: 26_114_000 picoseconds. - Weight::from_parts(27_069_000, 18048) + // Minimum execution time: 23_265_000 picoseconds. + Weight::from_parts(24_703_000, 18048) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Alliance Members (r:2 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn elevate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `443` + // Measured: `476` // Estimated: `12362` - // Minimum execution time: 25_882_000 picoseconds. - Weight::from_parts(26_923_000, 12362) + // Minimum execution time: 23_049_000 picoseconds. + Weight::from_parts(23_875_000, 12362) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Alliance Members (r:4 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Alliance RetiringMembers (r:0 w:1) - /// Proof: Alliance RetiringMembers (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Alliance::Members` (r:4 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::RetiringMembers` (r:0 w:1) + /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn give_retirement_notice() -> Weight { // Proof Size summary in bytes: - // Measured: `443` + // Measured: `476` // Estimated: `23734` - // Minimum execution time: 34_112_000 picoseconds. - Weight::from_parts(35_499_000, 23734) + // Minimum execution time: 29_124_000 picoseconds. + Weight::from_parts(30_369_000, 23734) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Alliance RetiringMembers (r:1 w:1) - /// Proof: Alliance RetiringMembers (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: Alliance Members (r:1 w:1) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: Alliance DepositOf (r:1 w:1) - /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Alliance::RetiringMembers` (r:1 w:1) + /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Alliance::Members` (r:1 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::DepositOf` (r:1 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn retire() -> Weight { // Proof Size summary in bytes: - // Measured: `687` + // Measured: `720` // Estimated: `6676` - // Minimum execution time: 41_239_000 picoseconds. - Weight::from_parts(42_764_000, 6676) + // Minimum execution time: 36_376_000 picoseconds. + Weight::from_parts(38_221_000, 6676) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Alliance Members (r:3 w:1) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Alliance DepositOf (r:1 w:1) - /// Proof: Alliance DepositOf (max_values: None, max_size: Some(64), added: 2539, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::DepositOf` (r:1 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn kick_member() -> Weight { // Proof Size summary in bytes: - // Measured: `707` + // Measured: `740` // Estimated: `18048` - // Minimum execution time: 68_071_000 picoseconds. - Weight::from_parts(71_808_000, 18048) + // Minimum execution time: 56_560_000 picoseconds. + Weight::from_parts(58_621_000, 18048) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Alliance UnscrupulousAccounts (r:1 w:1) - /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: Alliance UnscrupulousWebsites (r:1 w:1) - /// Proof: Alliance UnscrupulousWebsites (max_values: Some(1), max_size: Some(25702), added: 26197, mode: MaxEncodedLen) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousWebsites` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousWebsites` (`max_values`: Some(1), `max_size`: Some(25702), added: 26197, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. /// The range of component `l` is `[0, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `246` + // Measured: `279` // Estimated: `27187` - // Minimum execution time: 7_006_000 picoseconds. - Weight::from_parts(7_253_000, 27187) - // Standard Error: 3_403 - .saturating_add(Weight::from_parts(1_680_082, 0).saturating_mul(n.into())) - // Standard Error: 1_333 - .saturating_add(Weight::from_parts(72_943, 0).saturating_mul(l.into())) + // Minimum execution time: 5_191_000 picoseconds. + Weight::from_parts(5_410_000, 27187) + // Standard Error: 3_215 + .saturating_add(Weight::from_parts(1_018_569, 0).saturating_mul(n.into())) + // Standard Error: 1_259 + .saturating_add(Weight::from_parts(68_712, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Alliance UnscrupulousAccounts (r:1 w:1) - /// Proof: Alliance UnscrupulousAccounts (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: Alliance UnscrupulousWebsites (r:1 w:1) - /// Proof: Alliance UnscrupulousWebsites (max_values: Some(1), max_size: Some(25702), added: 26197, mode: MaxEncodedLen) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousWebsites` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousWebsites` (`max_values`: Some(1), `max_size`: Some(25702), added: 26197, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. /// The range of component `l` is `[0, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + l * (100 Β±0) + n * (289 Β±0)` // Estimated: `27187` - // Minimum execution time: 7_292_000 picoseconds. - Weight::from_parts(7_629_000, 27187) - // Standard Error: 176_225 - .saturating_add(Weight::from_parts(16_646_429, 0).saturating_mul(n.into())) - // Standard Error: 69_017 - .saturating_add(Weight::from_parts(310_978, 0).saturating_mul(l.into())) + // Minimum execution time: 5_361_000 picoseconds. + Weight::from_parts(5_494_000, 27187) + // Standard Error: 181_133 + .saturating_add(Weight::from_parts(16_322_982, 0).saturating_mul(n.into())) + // Standard Error: 70_940 + .saturating_add(Weight::from_parts(343_581, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Alliance Members (r:3 w:2) - /// Proof: Alliance Members (max_values: None, max_size: Some(3211), added: 5686, mode: MaxEncodedLen) - /// Storage: AllianceMotion Proposals (r:1 w:0) - /// Proof Skipped: AllianceMotion Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Members (r:0 w:1) - /// Proof Skipped: AllianceMotion Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: AllianceMotion Prime (r:0 w:1) - /// Proof Skipped: AllianceMotion Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Alliance::Members` (r:3 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn abdicate_fellow_status() -> Weight { // Proof Size summary in bytes: - // Measured: `443` + // Measured: `476` // Estimated: `18048` - // Minimum execution time: 31_798_000 picoseconds. - Weight::from_parts(33_463_000, 18048) + // Minimum execution time: 28_856_000 picoseconds. + Weight::from_parts(29_875_000, 18048) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } diff --git a/substrate/frame/asset-conversion/src/weights.rs b/substrate/frame/asset-conversion/src/weights.rs index a0e687f7a41..4eaa115c694 100644 --- a/substrate/frame/asset-conversion/src/weights.rs +++ b/substrate/frame/asset-conversion/src/weights.rs @@ -17,24 +17,28 @@ //! Autogenerated weights for `pallet_asset_conversion` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-30, STEPS: `5`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/debug/substrate-node +// ./target/production/substrate-node // benchmark // pallet // --chain=dev -// --steps=5 -// --repeat=2 -// --pallet=pallet-asset-conversion +// --steps=50 +// --repeat=20 +// --pallet=pallet_asset_conversion +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 // --output=./substrate/frame/asset-conversion/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -59,11 +63,9 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `AssetConversion::Pools` (r:1 w:1) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:2 w:2) + /// Storage: `Assets::Asset` (r:2 w:0) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `AssetConversion::NextPoolAssetId` (r:1 w:1) /// Proof: `AssetConversion::NextPoolAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -73,12 +75,12 @@ impl WeightInfo for SubstrateWeight { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn create_pool() -> Weight { // Proof Size summary in bytes: - // Measured: `1081` + // Measured: `910` // Estimated: `6360` - // Minimum execution time: 1_576_000_000 picoseconds. - Weight::from_parts(1_668_000_000, 6360) - .saturating_add(T::DbWeight::get().reads(10_u64)) - .saturating_add(T::DbWeight::get().writes(10_u64)) + // Minimum execution time: 82_941_000 picoseconds. + Weight::from_parts(85_526_000, 6360) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) @@ -86,18 +88,20 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:4 w:4) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Asset` (r:1 w:1) /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Account` (r:2 w:2) /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn add_liquidity() -> Weight { // Proof Size summary in bytes: - // Measured: `1761` + // Measured: `1507` // Estimated: `11426` - // Minimum execution time: 1_636_000_000 picoseconds. - Weight::from_parts(1_894_000_000, 11426) - .saturating_add(T::DbWeight::get().reads(10_u64)) - .saturating_add(T::DbWeight::get().writes(9_u64)) + // Minimum execution time: 138_424_000 picoseconds. + Weight::from_parts(142_083_000, 11426) + .saturating_add(T::DbWeight::get().reads(11_u64)) + .saturating_add(T::DbWeight::get().writes(10_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) @@ -111,10 +115,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn remove_liquidity() -> Weight { // Proof Size summary in bytes: - // Measured: `1750` + // Measured: `1650` // Estimated: `11426` - // Minimum execution time: 1_507_000_000 picoseconds. - Weight::from_parts(1_524_000_000, 11426) + // Minimum execution time: 122_132_000 picoseconds. + Weight::from_parts(125_143_000, 11426) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } @@ -125,12 +129,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[2, 4]`. fn swap_exact_tokens_for_tokens(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + n * (522 Β±0)` + // Measured: `89 + n * (419 Β±0)` // Estimated: `990 + n * (5218 Β±0)` - // Minimum execution time: 937_000_000 picoseconds. - Weight::from_parts(941_000_000, 990) - // Standard Error: 40_863_477 - .saturating_add(Weight::from_parts(205_862_068, 0).saturating_mul(n.into())) + // Minimum execution time: 77_183_000 picoseconds. + Weight::from_parts(78_581_000, 990) + // Standard Error: 306_918 + .saturating_add(Weight::from_parts(10_581_054, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -142,12 +146,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[2, 4]`. fn swap_tokens_for_exact_tokens(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + n * (522 Β±0)` + // Measured: `89 + n * (419 Β±0)` // Estimated: `990 + n * (5218 Β±0)` - // Minimum execution time: 935_000_000 picoseconds. - Weight::from_parts(947_000_000, 990) - // Standard Error: 46_904_620 - .saturating_add(Weight::from_parts(218_275_862, 0).saturating_mul(n.into())) + // Minimum execution time: 76_962_000 picoseconds. + Weight::from_parts(78_315_000, 990) + // Standard Error: 311_204 + .saturating_add(Weight::from_parts(10_702_400, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -158,11 +162,9 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { /// Storage: `AssetConversion::Pools` (r:1 w:1) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:2 w:2) + /// Storage: `Assets::Asset` (r:2 w:0) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `AssetConversion::NextPoolAssetId` (r:1 w:1) /// Proof: `AssetConversion::NextPoolAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -172,12 +174,12 @@ impl WeightInfo for () { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn create_pool() -> Weight { // Proof Size summary in bytes: - // Measured: `1081` + // Measured: `910` // Estimated: `6360` - // Minimum execution time: 1_576_000_000 picoseconds. - Weight::from_parts(1_668_000_000, 6360) - .saturating_add(RocksDbWeight::get().reads(10_u64)) - .saturating_add(RocksDbWeight::get().writes(10_u64)) + // Minimum execution time: 82_941_000 picoseconds. + Weight::from_parts(85_526_000, 6360) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) @@ -185,18 +187,20 @@ impl WeightInfo for () { /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:4 w:4) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Asset` (r:1 w:1) /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Account` (r:2 w:2) /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn add_liquidity() -> Weight { // Proof Size summary in bytes: - // Measured: `1761` + // Measured: `1507` // Estimated: `11426` - // Minimum execution time: 1_636_000_000 picoseconds. - Weight::from_parts(1_894_000_000, 11426) - .saturating_add(RocksDbWeight::get().reads(10_u64)) - .saturating_add(RocksDbWeight::get().writes(9_u64)) + // Minimum execution time: 138_424_000 picoseconds. + Weight::from_parts(142_083_000, 11426) + .saturating_add(RocksDbWeight::get().reads(11_u64)) + .saturating_add(RocksDbWeight::get().writes(10_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) @@ -210,10 +214,10 @@ impl WeightInfo for () { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn remove_liquidity() -> Weight { // Proof Size summary in bytes: - // Measured: `1750` + // Measured: `1650` // Estimated: `11426` - // Minimum execution time: 1_507_000_000 picoseconds. - Weight::from_parts(1_524_000_000, 11426) + // Minimum execution time: 122_132_000 picoseconds. + Weight::from_parts(125_143_000, 11426) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } @@ -224,12 +228,12 @@ impl WeightInfo for () { /// The range of component `n` is `[2, 4]`. fn swap_exact_tokens_for_tokens(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + n * (522 Β±0)` + // Measured: `89 + n * (419 Β±0)` // Estimated: `990 + n * (5218 Β±0)` - // Minimum execution time: 937_000_000 picoseconds. - Weight::from_parts(941_000_000, 990) - // Standard Error: 40_863_477 - .saturating_add(Weight::from_parts(205_862_068, 0).saturating_mul(n.into())) + // Minimum execution time: 77_183_000 picoseconds. + Weight::from_parts(78_581_000, 990) + // Standard Error: 306_918 + .saturating_add(Weight::from_parts(10_581_054, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -241,12 +245,12 @@ impl WeightInfo for () { /// The range of component `n` is `[2, 4]`. fn swap_tokens_for_exact_tokens(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + n * (522 Β±0)` + // Measured: `89 + n * (419 Β±0)` // Estimated: `990 + n * (5218 Β±0)` - // Minimum execution time: 935_000_000 picoseconds. - Weight::from_parts(947_000_000, 990) - // Standard Error: 46_904_620 - .saturating_add(Weight::from_parts(218_275_862, 0).saturating_mul(n.into())) + // Minimum execution time: 76_962_000 picoseconds. + Weight::from_parts(78_315_000, 990) + // Standard Error: 311_204 + .saturating_add(Weight::from_parts(10_702_400, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) diff --git a/substrate/frame/asset-rate/src/weights.rs b/substrate/frame/asset-rate/src/weights.rs index 582e20e56d7..b8723ee3bed 100644 --- a/substrate/frame/asset-rate/src/weights.rs +++ b/substrate/frame/asset-rate/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_asset_rate +//! Autogenerated weights for `pallet_asset_rate` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/asset-rate/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/asset-rate/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,83 +49,83 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_asset_rate. +/// Weight functions needed for `pallet_asset_rate`. pub trait WeightInfo { fn create() -> Weight; fn update() -> Weight; fn remove() -> Weight; } -/// Weights for pallet_asset_rate using the Substrate node and recommended hardware. +/// Weights for `pallet_asset_rate` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3501` - // Minimum execution time: 11_700_000 picoseconds. - Weight::from_parts(12_158_000, 3501) + // Minimum execution time: 9_447_000 picoseconds. + Weight::from_parts(10_078_000, 3501) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn update() -> Weight { // Proof Size summary in bytes: // Measured: `137` // Estimated: `3501` - // Minimum execution time: 12_119_000 picoseconds. - Weight::from_parts(12_548_000, 3501) + // Minimum execution time: 9_844_000 picoseconds. + Weight::from_parts(10_240_000, 3501) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn remove() -> Weight { // Proof Size summary in bytes: // Measured: `137` // Estimated: `3501` - // Minimum execution time: 12_541_000 picoseconds. - Weight::from_parts(12_956_000, 3501) + // Minimum execution time: 10_411_000 picoseconds. + Weight::from_parts(10_686_000, 3501) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3501` - // Minimum execution time: 11_700_000 picoseconds. - Weight::from_parts(12_158_000, 3501) + // Minimum execution time: 9_447_000 picoseconds. + Weight::from_parts(10_078_000, 3501) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn update() -> Weight { // Proof Size summary in bytes: // Measured: `137` // Estimated: `3501` - // Minimum execution time: 12_119_000 picoseconds. - Weight::from_parts(12_548_000, 3501) + // Minimum execution time: 9_844_000 picoseconds. + Weight::from_parts(10_240_000, 3501) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn remove() -> Weight { // Proof Size summary in bytes: // Measured: `137` // Estimated: `3501` - // Minimum execution time: 12_541_000 picoseconds. - Weight::from_parts(12_956_000, 3501) + // Minimum execution time: 10_411_000 picoseconds. + Weight::from_parts(10_686_000, 3501) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/assets/src/weights.rs b/substrate/frame/assets/src/weights.rs index f20f7e317cf..f5199105fe3 100644 --- a/substrate/frame/assets/src/weights.rs +++ b/substrate/frame/assets/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_assets +//! Autogenerated weights for `pallet_assets` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/assets/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/assets/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_assets. +/// Weight functions needed for `pallet_assets`. pub trait WeightInfo { fn create() -> Weight; fn force_create() -> Weight; @@ -86,882 +85,890 @@ pub trait WeightInfo { fn block() -> Weight; } -/// Weights for pallet_assets using the Substrate node and recommended hardware. +/// Weights for `pallet_assets` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3675` - // Minimum execution time: 31_340_000 picoseconds. - Weight::from_parts(31_977_000, 3675) + // Minimum execution time: 24_690_000 picoseconds. + Weight::from_parts(25_878_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `153` // Estimated: `3675` - // Minimum execution time: 13_342_000 picoseconds. - Weight::from_parts(13_782_000, 3675) + // Minimum execution time: 10_997_000 picoseconds. + Weight::from_parts(11_369_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn start_destroy() -> Weight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 14_437_000 picoseconds. - Weight::from_parts(14_833_000, 3675) + // Minimum execution time: 11_536_000 picoseconds. + Weight::from_parts(12_309_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1001 w:1000) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1000 w:1000) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1001 w:1000) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1000 w:1000) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `c` is `[0, 1000]`. fn destroy_accounts(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + c * (208 Β±0)` // Estimated: `3675 + c * (2609 Β±0)` - // Minimum execution time: 18_728_000 picoseconds. - Weight::from_parts(18_982_000, 3675) - // Standard Error: 11_708 - .saturating_add(Weight::from_parts(14_363_570, 0).saturating_mul(c.into())) + // Minimum execution time: 15_798_000 picoseconds. + Weight::from_parts(16_005_000, 3675) + // Standard Error: 7_818 + .saturating_add(Weight::from_parts(12_826_278, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 2609).saturating_mul(c.into())) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1001 w:1000) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1001 w:1000) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 1000]`. fn destroy_approvals(a: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `522 + a * (86 Β±0)` // Estimated: `3675 + a * (2623 Β±0)` - // Minimum execution time: 18_611_000 picoseconds. - Weight::from_parts(18_970_000, 3675) - // Standard Error: 13_224 - .saturating_add(Weight::from_parts(16_397_299, 0).saturating_mul(a.into())) + // Minimum execution time: 16_334_000 picoseconds. + Weight::from_parts(16_616_000, 3675) + // Standard Error: 6_402 + .saturating_add(Weight::from_parts(13_502_238, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) .saturating_add(Weight::from_parts(0, 2623).saturating_mul(a.into())) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:0) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:0) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) fn finish_destroy() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 14_504_000 picoseconds. - Weight::from_parts(14_906_000, 3675) + // Minimum execution time: 12_278_000 picoseconds. + Weight::from_parts(12_834_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn mint() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 26_653_000 picoseconds. - Weight::from_parts(27_260_000, 3675) + // Minimum execution time: 21_793_000 picoseconds. + Weight::from_parts(22_284_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn burn() -> Weight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 33_625_000 picoseconds. - Weight::from_parts(34_474_000, 3675) + // Minimum execution time: 28_712_000 picoseconds. + Weight::from_parts(29_710_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 47_609_000 picoseconds. - Weight::from_parts(48_476_000, 6208) + // Minimum execution time: 41_331_000 picoseconds. + Weight::from_parts(42_362_000, 6208) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 41_625_000 picoseconds. - Weight::from_parts(43_030_000, 6208) + // Minimum execution time: 37_316_000 picoseconds. + Weight::from_parts(38_200_000, 6208) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 47_661_000 picoseconds. - Weight::from_parts(48_469_000, 6208) + // Minimum execution time: 41_347_000 picoseconds. + Weight::from_parts(42_625_000, 6208) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 17_727_000 picoseconds. - Weight::from_parts(18_384_000, 3675) + // Minimum execution time: 15_072_000 picoseconds. + Weight::from_parts(15_925_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn thaw() -> Weight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 17_657_000 picoseconds. - Weight::from_parts(18_282_000, 3675) + // Minimum execution time: 14_991_000 picoseconds. + Weight::from_parts(15_987_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn freeze_asset() -> Weight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 13_743_000 picoseconds. - Weight::from_parts(14_193_000, 3675) + // Minimum execution time: 11_296_000 picoseconds. + Weight::from_parts(12_052_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn thaw_asset() -> Weight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 13_653_000 picoseconds. - Weight::from_parts(14_263_000, 3675) + // Minimum execution time: 11_446_000 picoseconds. + Weight::from_parts(11_791_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:0) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:0) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 15_328_000 picoseconds. - Weight::from_parts(16_042_000, 3675) + // Minimum execution time: 13_134_000 picoseconds. + Weight::from_parts(13_401_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn set_team() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 14_097_000 picoseconds. - Weight::from_parts(14_641_000, 3675) + // Minimum execution time: 11_395_000 picoseconds. + Weight::from_parts(11_718_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, _s: u32, ) -> Weight { + fn set_metadata(n: u32, _s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 29_535_000 picoseconds. - Weight::from_parts(31_456_892, 3675) + // Minimum execution time: 25_147_000 picoseconds. + Weight::from_parts(26_614_272, 3675) + // Standard Error: 959 + .saturating_add(Weight::from_parts(2_300, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 30_680_000 picoseconds. - Weight::from_parts(31_930_000, 3675) + // Minimum execution time: 26_094_000 picoseconds. + Weight::from_parts(27_199_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(_n: u32, s: u32, ) -> Weight { + fn force_set_metadata(n: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `190` // Estimated: `3675` - // Minimum execution time: 14_660_000 picoseconds. - Weight::from_parts(15_718_387, 3675) - // Standard Error: 622 - .saturating_add(Weight::from_parts(2_640, 0).saturating_mul(s.into())) + // Minimum execution time: 11_977_000 picoseconds. + Weight::from_parts(12_719_933, 3675) + // Standard Error: 429 + .saturating_add(Weight::from_parts(3_239, 0).saturating_mul(n.into())) + // Standard Error: 429 + .saturating_add(Weight::from_parts(3_941, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) fn force_clear_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 30_853_000 picoseconds. - Weight::from_parts(31_483_000, 3675) + // Minimum execution time: 25_859_000 picoseconds. + Weight::from_parts(26_654_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn force_asset_status() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 13_632_000 picoseconds. - Weight::from_parts(14_077_000, 3675) + // Minimum execution time: 10_965_000 picoseconds. + Weight::from_parts(11_595_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1 w:1) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) fn approve_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 33_780_000 picoseconds. - Weight::from_parts(34_533_000, 3675) + // Minimum execution time: 28_427_000 picoseconds. + Weight::from_parts(29_150_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1 w:1) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_approved() -> Weight { // Proof Size summary in bytes: // Measured: `668` // Estimated: `6208` - // Minimum execution time: 67_712_000 picoseconds. - Weight::from_parts(69_946_000, 6208) + // Minimum execution time: 60_227_000 picoseconds. + Weight::from_parts(61_839_000, 6208) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1 w:1) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) fn cancel_approval() -> Weight { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 36_668_000 picoseconds. - Weight::from_parts(37_637_000, 3675) + // Minimum execution time: 30_738_000 picoseconds. + Weight::from_parts(31_988_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1 w:1) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) fn force_cancel_approval() -> Weight { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 36_685_000 picoseconds. - Weight::from_parts(37_950_000, 3675) + // Minimum execution time: 31_444_000 picoseconds. + Weight::from_parts(32_126_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn set_min_balance() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 14_466_000 picoseconds. - Weight::from_parts(14_924_000, 3675) + // Minimum execution time: 11_890_000 picoseconds. + Weight::from_parts(12_462_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn touch() -> Weight { // Proof Size summary in bytes: // Measured: `453` // Estimated: `3675` - // Minimum execution time: 34_874_000 picoseconds. - Weight::from_parts(36_330_000, 3675) + // Minimum execution time: 30_675_000 picoseconds. + Weight::from_parts(31_749_000, 3675) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn touch_other() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 33_278_000 picoseconds. - Weight::from_parts(34_104_000, 3675) + // Minimum execution time: 28_290_000 picoseconds. + Weight::from_parts(29_405_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn refund() -> Weight { // Proof Size summary in bytes: // Measured: `579` // Estimated: `3675` - // Minimum execution time: 32_898_000 picoseconds. - Weight::from_parts(33_489_000, 3675) + // Minimum execution time: 30_195_000 picoseconds. + Weight::from_parts(31_105_000, 3675) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn refund_other() -> Weight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `3675` - // Minimum execution time: 31_243_000 picoseconds. - Weight::from_parts(31_909_000, 3675) + // Minimum execution time: 27_785_000 picoseconds. + Weight::from_parts(28_783_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn block() -> Weight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 17_692_000 picoseconds. - Weight::from_parts(18_253_000, 3675) + // Minimum execution time: 15_318_000 picoseconds. + Weight::from_parts(16_113_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3675` - // Minimum execution time: 31_340_000 picoseconds. - Weight::from_parts(31_977_000, 3675) + // Minimum execution time: 24_690_000 picoseconds. + Weight::from_parts(25_878_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `153` // Estimated: `3675` - // Minimum execution time: 13_342_000 picoseconds. - Weight::from_parts(13_782_000, 3675) + // Minimum execution time: 10_997_000 picoseconds. + Weight::from_parts(11_369_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn start_destroy() -> Weight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 14_437_000 picoseconds. - Weight::from_parts(14_833_000, 3675) + // Minimum execution time: 11_536_000 picoseconds. + Weight::from_parts(12_309_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1001 w:1000) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1000 w:1000) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1001 w:1000) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1000 w:1000) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `c` is `[0, 1000]`. fn destroy_accounts(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + c * (208 Β±0)` // Estimated: `3675 + c * (2609 Β±0)` - // Minimum execution time: 18_728_000 picoseconds. - Weight::from_parts(18_982_000, 3675) - // Standard Error: 11_708 - .saturating_add(Weight::from_parts(14_363_570, 0).saturating_mul(c.into())) + // Minimum execution time: 15_798_000 picoseconds. + Weight::from_parts(16_005_000, 3675) + // Standard Error: 7_818 + .saturating_add(Weight::from_parts(12_826_278, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 2609).saturating_mul(c.into())) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1001 w:1000) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1001 w:1000) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 1000]`. fn destroy_approvals(a: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `522 + a * (86 Β±0)` // Estimated: `3675 + a * (2623 Β±0)` - // Minimum execution time: 18_611_000 picoseconds. - Weight::from_parts(18_970_000, 3675) - // Standard Error: 13_224 - .saturating_add(Weight::from_parts(16_397_299, 0).saturating_mul(a.into())) + // Minimum execution time: 16_334_000 picoseconds. + Weight::from_parts(16_616_000, 3675) + // Standard Error: 6_402 + .saturating_add(Weight::from_parts(13_502_238, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(a.into()))) .saturating_add(Weight::from_parts(0, 2623).saturating_mul(a.into())) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:0) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:0) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) fn finish_destroy() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 14_504_000 picoseconds. - Weight::from_parts(14_906_000, 3675) + // Minimum execution time: 12_278_000 picoseconds. + Weight::from_parts(12_834_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn mint() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 26_653_000 picoseconds. - Weight::from_parts(27_260_000, 3675) + // Minimum execution time: 21_793_000 picoseconds. + Weight::from_parts(22_284_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn burn() -> Weight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 33_625_000 picoseconds. - Weight::from_parts(34_474_000, 3675) + // Minimum execution time: 28_712_000 picoseconds. + Weight::from_parts(29_710_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 47_609_000 picoseconds. - Weight::from_parts(48_476_000, 6208) + // Minimum execution time: 41_331_000 picoseconds. + Weight::from_parts(42_362_000, 6208) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 41_625_000 picoseconds. - Weight::from_parts(43_030_000, 6208) + // Minimum execution time: 37_316_000 picoseconds. + Weight::from_parts(38_200_000, 6208) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 47_661_000 picoseconds. - Weight::from_parts(48_469_000, 6208) + // Minimum execution time: 41_347_000 picoseconds. + Weight::from_parts(42_625_000, 6208) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 17_727_000 picoseconds. - Weight::from_parts(18_384_000, 3675) + // Minimum execution time: 15_072_000 picoseconds. + Weight::from_parts(15_925_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn thaw() -> Weight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 17_657_000 picoseconds. - Weight::from_parts(18_282_000, 3675) + // Minimum execution time: 14_991_000 picoseconds. + Weight::from_parts(15_987_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn freeze_asset() -> Weight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 13_743_000 picoseconds. - Weight::from_parts(14_193_000, 3675) + // Minimum execution time: 11_296_000 picoseconds. + Weight::from_parts(12_052_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn thaw_asset() -> Weight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 13_653_000 picoseconds. - Weight::from_parts(14_263_000, 3675) + // Minimum execution time: 11_446_000 picoseconds. + Weight::from_parts(11_791_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:0) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:0) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 15_328_000 picoseconds. - Weight::from_parts(16_042_000, 3675) + // Minimum execution time: 13_134_000 picoseconds. + Weight::from_parts(13_401_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn set_team() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 14_097_000 picoseconds. - Weight::from_parts(14_641_000, 3675) + // Minimum execution time: 11_395_000 picoseconds. + Weight::from_parts(11_718_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, _s: u32, ) -> Weight { + fn set_metadata(n: u32, _s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 29_535_000 picoseconds. - Weight::from_parts(31_456_892, 3675) + // Minimum execution time: 25_147_000 picoseconds. + Weight::from_parts(26_614_272, 3675) + // Standard Error: 959 + .saturating_add(Weight::from_parts(2_300, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 30_680_000 picoseconds. - Weight::from_parts(31_930_000, 3675) + // Minimum execution time: 26_094_000 picoseconds. + Weight::from_parts(27_199_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(_n: u32, s: u32, ) -> Weight { + fn force_set_metadata(n: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `190` // Estimated: `3675` - // Minimum execution time: 14_660_000 picoseconds. - Weight::from_parts(15_718_387, 3675) - // Standard Error: 622 - .saturating_add(Weight::from_parts(2_640, 0).saturating_mul(s.into())) + // Minimum execution time: 11_977_000 picoseconds. + Weight::from_parts(12_719_933, 3675) + // Standard Error: 429 + .saturating_add(Weight::from_parts(3_239, 0).saturating_mul(n.into())) + // Standard Error: 429 + .saturating_add(Weight::from_parts(3_941, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) fn force_clear_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 30_853_000 picoseconds. - Weight::from_parts(31_483_000, 3675) + // Minimum execution time: 25_859_000 picoseconds. + Weight::from_parts(26_654_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn force_asset_status() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 13_632_000 picoseconds. - Weight::from_parts(14_077_000, 3675) + // Minimum execution time: 10_965_000 picoseconds. + Weight::from_parts(11_595_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1 w:1) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) fn approve_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 33_780_000 picoseconds. - Weight::from_parts(34_533_000, 3675) + // Minimum execution time: 28_427_000 picoseconds. + Weight::from_parts(29_150_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1 w:1) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_approved() -> Weight { // Proof Size summary in bytes: // Measured: `668` // Estimated: `6208` - // Minimum execution time: 67_712_000 picoseconds. - Weight::from_parts(69_946_000, 6208) + // Minimum execution time: 60_227_000 picoseconds. + Weight::from_parts(61_839_000, 6208) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1 w:1) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) fn cancel_approval() -> Weight { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 36_668_000 picoseconds. - Weight::from_parts(37_637_000, 3675) + // Minimum execution time: 30_738_000 picoseconds. + Weight::from_parts(31_988_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Approvals (r:1 w:1) - /// Proof: Assets Approvals (max_values: None, max_size: Some(148), added: 2623, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) fn force_cancel_approval() -> Weight { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 36_685_000 picoseconds. - Weight::from_parts(37_950_000, 3675) + // Minimum execution time: 31_444_000 picoseconds. + Weight::from_parts(32_126_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn set_min_balance() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 14_466_000 picoseconds. - Weight::from_parts(14_924_000, 3675) + // Minimum execution time: 11_890_000 picoseconds. + Weight::from_parts(12_462_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn touch() -> Weight { // Proof Size summary in bytes: // Measured: `453` // Estimated: `3675` - // Minimum execution time: 34_874_000 picoseconds. - Weight::from_parts(36_330_000, 3675) + // Minimum execution time: 30_675_000 picoseconds. + Weight::from_parts(31_749_000, 3675) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn touch_other() -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 33_278_000 picoseconds. - Weight::from_parts(34_104_000, 3675) + // Minimum execution time: 28_290_000 picoseconds. + Weight::from_parts(29_405_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn refund() -> Weight { // Proof Size summary in bytes: // Measured: `579` // Estimated: `3675` - // Minimum execution time: 32_898_000 picoseconds. - Weight::from_parts(33_489_000, 3675) + // Minimum execution time: 30_195_000 picoseconds. + Weight::from_parts(31_105_000, 3675) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) fn refund_other() -> Weight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `3675` - // Minimum execution time: 31_243_000 picoseconds. - Weight::from_parts(31_909_000, 3675) + // Minimum execution time: 27_785_000 picoseconds. + Weight::from_parts(28_783_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Assets Asset (r:1 w:0) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn block() -> Weight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 17_692_000 picoseconds. - Weight::from_parts(18_253_000, 3675) + // Minimum execution time: 15_318_000 picoseconds. + Weight::from_parts(16_113_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index b693f4fce9b..1ba0cdd0cc1 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -37,8 +37,9 @@ use sp_core::{ use sp_io; use sp_runtime::{ curve::PiecewiseLinear, + generic::UncheckedExtrinsic, impl_opaque_keys, - testing::{Digest, DigestItem, Header, TestXt}, + testing::{Digest, DigestItem, Header}, traits::{Header as _, OpaqueKeys}, BuildStorage, Perbill, }; @@ -74,7 +75,7 @@ where RuntimeCall: From, { type OverarchingCall = RuntimeCall; - type Extrinsic = TestXt; + type Extrinsic = UncheckedExtrinsic; } impl_opaque_keys! { diff --git a/substrate/frame/bags-list/src/weights.rs b/substrate/frame/bags-list/src/weights.rs index d929c6bb959..804b702cc9a 100644 --- a/substrate/frame/bags-list/src/weights.rs +++ b/substrate/frame/bags-list/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_bags_list +//! Autogenerated weights for `pallet_bags_list` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/bags-list/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/bags-list/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,123 +49,123 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_bags_list. +/// Weight functions needed for `pallet_bags_list`. pub trait WeightInfo { fn rebag_non_terminal() -> Weight; fn rebag_terminal() -> Weight; fn put_in_front_of() -> Weight; } -/// Weights for pallet_bags_list using the Substrate node and recommended hardware. +/// Weights for `pallet_bags_list` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:4 w:4) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:4 w:4) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn rebag_non_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1724` + // Measured: `1719` // Estimated: `11506` - // Minimum execution time: 62_137_000 picoseconds. - Weight::from_parts(64_050_000, 11506) + // Minimum execution time: 55_856_000 picoseconds. + Weight::from_parts(59_022_000, 11506) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn rebag_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1618` + // Measured: `1613` // Estimated: `8877` - // Minimum execution time: 60_880_000 picoseconds. - Weight::from_parts(62_078_000, 8877) + // Minimum execution time: 55_418_000 picoseconds. + Weight::from_parts(57_352_000, 8877) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: VoterList ListNodes (r:4 w:4) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:2 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:2 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `VoterList::ListNodes` (r:4 w:4) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:2 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn put_in_front_of() -> Weight { // Proof Size summary in bytes: - // Measured: `1930` + // Measured: `1925` // Estimated: `11506` - // Minimum execution time: 68_911_000 picoseconds. - Weight::from_parts(70_592_000, 11506) + // Minimum execution time: 63_820_000 picoseconds. + Weight::from_parts(65_530_000, 11506) .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:4 w:4) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:4 w:4) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn rebag_non_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1724` + // Measured: `1719` // Estimated: `11506` - // Minimum execution time: 62_137_000 picoseconds. - Weight::from_parts(64_050_000, 11506) + // Minimum execution time: 55_856_000 picoseconds. + Weight::from_parts(59_022_000, 11506) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:3 w:3) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:2 w:2) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn rebag_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1618` + // Measured: `1613` // Estimated: `8877` - // Minimum execution time: 60_880_000 picoseconds. - Weight::from_parts(62_078_000, 8877) + // Minimum execution time: 55_418_000 picoseconds. + Weight::from_parts(57_352_000, 8877) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: VoterList ListNodes (r:4 w:4) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:2 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:2 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: `VoterList::ListNodes` (r:4 w:4) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:2 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn put_in_front_of() -> Weight { // Proof Size summary in bytes: - // Measured: `1930` + // Measured: `1925` // Estimated: `11506` - // Minimum execution time: 68_911_000 picoseconds. - Weight::from_parts(70_592_000, 11506) + // Minimum execution time: 63_820_000 picoseconds. + Weight::from_parts(65_530_000, 11506) .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index 64ae90c6757..db114290ed8 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -53,6 +53,7 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] try-runtime = [ diff --git a/substrate/frame/balances/src/tests/currency_tests.rs b/substrate/frame/balances/src/tests/currency_tests.rs index 46a4c4caefc..10bb79901f8 100644 --- a/substrate/frame/balances/src/tests/currency_tests.rs +++ b/substrate/frame/balances/src/tests/currency_tests.rs @@ -30,6 +30,7 @@ use frame_support::{ StorageNoopGuard, }; use frame_system::Event as SysEvent; +use sp_runtime::traits::DispatchTransaction; const ID_1: LockIdentifier = *b"1 "; const ID_2: LockIdentifier = *b"2 "; @@ -240,17 +241,17 @@ fn lock_should_work_reserve() { TokenError::Frozen ); assert_noop!(Balances::reserve(&1, 1), Error::::LiquidityRestrictions,); - assert!( as SignedExtension>::pre_dispatch( + assert!(ChargeTransactionPayment::::validate_and_prepare( ChargeTransactionPayment::from(1), - &1, + Some(1).into(), CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, ) .is_err()); - assert!( as SignedExtension>::pre_dispatch( + assert!(ChargeTransactionPayment::::validate_and_prepare( ChargeTransactionPayment::from(0), - &1, + Some(1).into(), CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, @@ -271,17 +272,17 @@ fn lock_should_work_tx_fee() { TokenError::Frozen ); assert_noop!(Balances::reserve(&1, 1), Error::::LiquidityRestrictions,); - assert!( as SignedExtension>::pre_dispatch( + assert!(ChargeTransactionPayment::::validate_and_prepare( ChargeTransactionPayment::from(1), - &1, + Some(1).into(), CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, ) .is_err()); - assert!( as SignedExtension>::pre_dispatch( + assert!(ChargeTransactionPayment::::validate_and_prepare( ChargeTransactionPayment::from(0), - &1, + Some(1).into(), CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, diff --git a/substrate/frame/balances/src/tests/mod.rs b/substrate/frame/balances/src/tests/mod.rs index 599909fa943..ee076a10fd1 100644 --- a/substrate/frame/balances/src/tests/mod.rs +++ b/substrate/frame/balances/src/tests/mod.rs @@ -37,7 +37,7 @@ use scale_info::TypeInfo; use sp_core::hexdisplay::HexDisplay; use sp_io; use sp_runtime::{ - traits::{BadOrigin, SignedExtension, Zero}, + traits::{BadOrigin, Zero}, ArithmeticError, BuildStorage, DispatchError, DispatchResult, FixedPointNumber, RuntimeDebug, TokenError, }; @@ -96,13 +96,13 @@ impl frame_system::Config for Test { type AccountData = super::AccountData; } +#[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig as pallet_transaction_payment::DefaultConfig)] impl pallet_transaction_payment::Config for Test { type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = CurrencyAdapter, ()>; type OperationalFeeMultiplier = ConstU8<5>; type WeightToFee = IdentityFee; type LengthToFee = IdentityFee; - type FeeMultiplierUpdate = (); } pub(crate) type Balance = u64; diff --git a/substrate/frame/balances/src/weights.rs b/substrate/frame/balances/src/weights.rs index f875ea189ba..6bcfe050af7 100644 --- a/substrate/frame/balances/src/weights.rs +++ b/substrate/frame/balances/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_balances` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_balances +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_balances -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/balances/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -69,8 +71,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 46_329_000 picoseconds. - Weight::from_parts(47_297_000, 3593) + // Minimum execution time: 46_407_000 picoseconds. + Weight::from_parts(47_561_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -80,8 +82,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 36_187_000 picoseconds. - Weight::from_parts(36_900_000, 3593) + // Minimum execution time: 36_574_000 picoseconds. + Weight::from_parts(37_682_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -91,8 +93,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 13_498_000 picoseconds. - Weight::from_parts(14_143_000, 3593) + // Minimum execution time: 13_990_000 picoseconds. + Weight::from_parts(14_568_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -102,8 +104,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 18_756_000 picoseconds. - Weight::from_parts(19_553_000, 3593) + // Minimum execution time: 19_594_000 picoseconds. + Weight::from_parts(20_148_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -113,8 +115,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6196` - // Minimum execution time: 47_826_000 picoseconds. - Weight::from_parts(48_834_000, 6196) + // Minimum execution time: 47_945_000 picoseconds. + Weight::from_parts(49_363_000, 6196) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -124,8 +126,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 44_621_000 picoseconds. - Weight::from_parts(45_151_000, 3593) + // Minimum execution time: 45_205_000 picoseconds. + Weight::from_parts(45_952_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -135,8 +137,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 16_194_000 picoseconds. - Weight::from_parts(16_945_000, 3593) + // Minimum execution time: 16_593_000 picoseconds. + Weight::from_parts(17_123_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -147,10 +149,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + u * (135 Β±0)` // Estimated: `990 + u * (2603 Β±0)` - // Minimum execution time: 15_782_000 picoseconds. - Weight::from_parts(16_118_000, 990) - // Standard Error: 10_499 - .saturating_add(Weight::from_parts(13_327_660, 0).saturating_mul(u.into())) + // Minimum execution time: 16_182_000 picoseconds. + Weight::from_parts(16_412_000, 990) + // Standard Error: 10_148 + .saturating_add(Weight::from_parts(13_382_459, 0).saturating_mul(u.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -159,8 +161,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_157_000 picoseconds. - Weight::from_parts(6_507_000, 0) + // Minimum execution time: 6_478_000 picoseconds. + Weight::from_parts(6_830_000, 0) } } @@ -172,8 +174,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 46_329_000 picoseconds. - Weight::from_parts(47_297_000, 3593) + // Minimum execution time: 46_407_000 picoseconds. + Weight::from_parts(47_561_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -183,8 +185,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 36_187_000 picoseconds. - Weight::from_parts(36_900_000, 3593) + // Minimum execution time: 36_574_000 picoseconds. + Weight::from_parts(37_682_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -194,8 +196,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 13_498_000 picoseconds. - Weight::from_parts(14_143_000, 3593) + // Minimum execution time: 13_990_000 picoseconds. + Weight::from_parts(14_568_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -205,8 +207,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 18_756_000 picoseconds. - Weight::from_parts(19_553_000, 3593) + // Minimum execution time: 19_594_000 picoseconds. + Weight::from_parts(20_148_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -216,8 +218,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6196` - // Minimum execution time: 47_826_000 picoseconds. - Weight::from_parts(48_834_000, 6196) + // Minimum execution time: 47_945_000 picoseconds. + Weight::from_parts(49_363_000, 6196) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -227,8 +229,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 44_621_000 picoseconds. - Weight::from_parts(45_151_000, 3593) + // Minimum execution time: 45_205_000 picoseconds. + Weight::from_parts(45_952_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -238,8 +240,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 16_194_000 picoseconds. - Weight::from_parts(16_945_000, 3593) + // Minimum execution time: 16_593_000 picoseconds. + Weight::from_parts(17_123_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -250,10 +252,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + u * (135 Β±0)` // Estimated: `990 + u * (2603 Β±0)` - // Minimum execution time: 15_782_000 picoseconds. - Weight::from_parts(16_118_000, 990) - // Standard Error: 10_499 - .saturating_add(Weight::from_parts(13_327_660, 0).saturating_mul(u.into())) + // Minimum execution time: 16_182_000 picoseconds. + Weight::from_parts(16_412_000, 990) + // Standard Error: 10_148 + .saturating_add(Weight::from_parts(13_382_459, 0).saturating_mul(u.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -262,7 +264,7 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_157_000 picoseconds. - Weight::from_parts(6_507_000, 0) + // Minimum execution time: 6_478_000 picoseconds. + Weight::from_parts(6_830_000, 0) } } diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 9cce479890a..5d963d309a4 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -29,8 +29,8 @@ use pallet_session::historical as pallet_session_historical; use sp_core::{crypto::KeyTypeId, ConstU128}; use sp_io::TestExternalities; use sp_runtime::{ - app_crypto::ecdsa::Public, curve::PiecewiseLinear, impl_opaque_keys, testing::TestXt, - traits::OpaqueKeys, BuildStorage, Perbill, + app_crypto::ecdsa::Public, curve::PiecewiseLinear, generic::UncheckedExtrinsic, + impl_opaque_keys, traits::OpaqueKeys, BuildStorage, Perbill, }; use sp_staking::{EraIndex, SessionIndex}; use sp_state_machine::BasicExternalities; @@ -73,7 +73,7 @@ where RuntimeCall: From, { type OverarchingCall = RuntimeCall; - type Extrinsic = TestXt; + type Extrinsic = UncheckedExtrinsic; } parameter_types! { diff --git a/substrate/frame/benchmarking/src/weights.rs b/substrate/frame/benchmarking/src/weights.rs index 13d73e420cc..76f9a3fe52b 100644 --- a/substrate/frame/benchmarking/src/weights.rs +++ b/substrate/frame/benchmarking/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for frame_benchmarking +//! Autogenerated weights for `frame_benchmarking` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/benchmarking/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/benchmarking/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for frame_benchmarking. +/// Weight functions needed for `frame_benchmarking`. pub trait WeightInfo { fn addition(i: u32, ) -> Weight; fn subtraction(i: u32, ) -> Weight; @@ -60,7 +59,7 @@ pub trait WeightInfo { fn sr25519_verification(i: u32, ) -> Weight; } -/// Weights for frame_benchmarking using the Substrate node and recommended hardware. +/// Weights for `frame_benchmarking` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 1000000]`. @@ -68,101 +67,101 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 147_000 picoseconds. - Weight::from_parts(185_656, 0) + // Minimum execution time: 150_000 picoseconds. + Weight::from_parts(190_440, 0) } /// The range of component `i` is `[0, 1000000]`. fn subtraction(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 146_000 picoseconds. - Weight::from_parts(189_816, 0) + // Minimum execution time: 151_000 picoseconds. + Weight::from_parts(201_397, 0) } /// The range of component `i` is `[0, 1000000]`. fn multiplication(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 148_000 picoseconds. - Weight::from_parts(202_367, 0) + // Minimum execution time: 152_000 picoseconds. + Weight::from_parts(187_862, 0) } /// The range of component `i` is `[0, 1000000]`. fn division(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 143_000 picoseconds. - Weight::from_parts(189_693, 0) + // Minimum execution time: 146_000 picoseconds. + Weight::from_parts(215_191, 0) } fn hashing() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 24_167_071_000 picoseconds. - Weight::from_parts(24_391_749_000, 0) + // Minimum execution time: 25_432_823_000 picoseconds. + Weight::from_parts(25_568_846_000, 0) } /// The range of component `i` is `[0, 100]`. fn sr25519_verification(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 231_000 picoseconds. - Weight::from_parts(2_998_013, 0) - // Standard Error: 6_256 - .saturating_add(Weight::from_parts(55_456_705, 0).saturating_mul(i.into())) + // Minimum execution time: 193_000 picoseconds. + Weight::from_parts(3_846_690, 0) + // Standard Error: 6_694 + .saturating_add(Weight::from_parts(40_647_582, 0).saturating_mul(i.into())) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { /// The range of component `i` is `[0, 1000000]`. fn addition(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 147_000 picoseconds. - Weight::from_parts(185_656, 0) + // Minimum execution time: 150_000 picoseconds. + Weight::from_parts(190_440, 0) } /// The range of component `i` is `[0, 1000000]`. fn subtraction(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 146_000 picoseconds. - Weight::from_parts(189_816, 0) + // Minimum execution time: 151_000 picoseconds. + Weight::from_parts(201_397, 0) } /// The range of component `i` is `[0, 1000000]`. fn multiplication(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 148_000 picoseconds. - Weight::from_parts(202_367, 0) + // Minimum execution time: 152_000 picoseconds. + Weight::from_parts(187_862, 0) } /// The range of component `i` is `[0, 1000000]`. fn division(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 143_000 picoseconds. - Weight::from_parts(189_693, 0) + // Minimum execution time: 146_000 picoseconds. + Weight::from_parts(215_191, 0) } fn hashing() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 24_167_071_000 picoseconds. - Weight::from_parts(24_391_749_000, 0) + // Minimum execution time: 25_432_823_000 picoseconds. + Weight::from_parts(25_568_846_000, 0) } /// The range of component `i` is `[0, 100]`. fn sr25519_verification(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 231_000 picoseconds. - Weight::from_parts(2_998_013, 0) - // Standard Error: 6_256 - .saturating_add(Weight::from_parts(55_456_705, 0).saturating_mul(i.into())) + // Minimum execution time: 193_000 picoseconds. + Weight::from_parts(3_846_690, 0) + // Standard Error: 6_694 + .saturating_add(Weight::from_parts(40_647_582, 0).saturating_mul(i.into())) } } diff --git a/substrate/frame/bounties/src/weights.rs b/substrate/frame/bounties/src/weights.rs index a172d15b56c..1f0e38b6702 100644 --- a/substrate/frame/bounties/src/weights.rs +++ b/substrate/frame/bounties/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_bounties +//! Autogenerated weights for `pallet_bounties` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/bounties/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/bounties/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_bounties. +/// Weight functions needed for `pallet_bounties`. pub trait WeightInfo { fn propose_bounty(d: u32, ) -> Weight; fn approve_bounty() -> Weight; @@ -65,169 +64,169 @@ pub trait WeightInfo { fn spend_funds(b: u32, ) -> Weight; } -/// Weights for pallet_bounties using the Substrate node and recommended hardware. +/// Weights for `pallet_bounties` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Bounties BountyCount (r:1 w:1) - /// Proof: Bounties BountyCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:0 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) + /// Storage: `Bounties::BountyCount` (r:1 w:1) + /// Proof: `Bounties::BountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:0 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 300]`. fn propose_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `276` + // Measured: `309` // Estimated: `3593` - // Minimum execution time: 29_384_000 picoseconds. - Weight::from_parts(30_820_018, 3593) - // Standard Error: 298 - .saturating_add(Weight::from_parts(2_920, 0).saturating_mul(d.into())) + // Minimum execution time: 24_286_000 picoseconds. + Weight::from_parts(25_657_314, 3593) + // Standard Error: 215 + .saturating_add(Weight::from_parts(1_116, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn approve_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `368` + // Measured: `401` // Estimated: `3642` - // Minimum execution time: 10_873_000 picoseconds. - Weight::from_parts(11_421_000, 3642) + // Minimum execution time: 12_526_000 picoseconds. + Weight::from_parts(13_373_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `388` + // Measured: `421` // Estimated: `3642` - // Minimum execution time: 9_181_000 picoseconds. - Weight::from_parts(9_726_000, 3642) + // Minimum execution time: 11_807_000 picoseconds. + Weight::from_parts(12_340_000, 3642) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `564` + // Measured: `597` // Estimated: `3642` - // Minimum execution time: 30_257_000 picoseconds. - Weight::from_parts(30_751_000, 3642) + // Minimum execution time: 27_183_000 picoseconds. + Weight::from_parts(28_250_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `560` + // Measured: `593` // Estimated: `3642` - // Minimum execution time: 27_850_000 picoseconds. - Weight::from_parts(28_821_000, 3642) + // Minimum execution time: 26_775_000 picoseconds. + Weight::from_parts(27_667_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:0) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn award_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `572` + // Measured: `605` // Estimated: `3642` - // Minimum execution time: 19_164_000 picoseconds. - Weight::from_parts(20_136_000, 3642) + // Minimum execution time: 16_089_000 picoseconds. + Weight::from_parts(16_909_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:3 w:3) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) - /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `936` + // Measured: `969` // Estimated: `8799` - // Minimum execution time: 120_235_000 picoseconds. - Weight::from_parts(121_673_000, 8799) + // Minimum execution time: 104_973_000 picoseconds. + Weight::from_parts(107_696_000, 8799) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:0) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `616` + // Measured: `649` // Estimated: `3642` - // Minimum execution time: 35_713_000 picoseconds. - Weight::from_parts(37_174_000, 3642) + // Minimum execution time: 30_702_000 picoseconds. + Weight::from_parts(32_615_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:0) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `852` + // Measured: `885` // Estimated: `6196` - // Minimum execution time: 81_037_000 picoseconds. - Weight::from_parts(83_294_000, 6196) + // Minimum execution time: 72_055_000 picoseconds. + Weight::from_parts(73_900_000, 6196) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn extend_bounty_expiry() -> Weight { // Proof Size summary in bytes: - // Measured: `424` + // Measured: `457` // Estimated: `3642` - // Minimum execution time: 15_348_000 picoseconds. - Weight::from_parts(15_776_000, 3642) + // Minimum execution time: 12_057_000 picoseconds. + Weight::from_parts(12_744_000, 3642) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:100 w:100) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:200 w:200) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:100 w:100) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:200 w:200) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 100]`. fn spend_funds(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `4 + b * (297 Β±0)` + // Measured: `37 + b * (297 Β±0)` // Estimated: `1887 + b * (5206 Β±0)` - // Minimum execution time: 5_082_000 picoseconds. - Weight::from_parts(5_126_000, 1887) - // Standard Error: 21_949 - .saturating_add(Weight::from_parts(42_635_308, 0).saturating_mul(b.into())) + // Minimum execution time: 3_489_000 picoseconds. + Weight::from_parts(8_384_129, 1887) + // Standard Error: 18_066 + .saturating_add(Weight::from_parts(31_612_331, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -236,168 +235,168 @@ impl WeightInfo for SubstrateWeight { } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Bounties BountyCount (r:1 w:1) - /// Proof: Bounties BountyCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:0 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) + /// Storage: `Bounties::BountyCount` (r:1 w:1) + /// Proof: `Bounties::BountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:0 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 300]`. fn propose_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `276` + // Measured: `309` // Estimated: `3593` - // Minimum execution time: 29_384_000 picoseconds. - Weight::from_parts(30_820_018, 3593) - // Standard Error: 298 - .saturating_add(Weight::from_parts(2_920, 0).saturating_mul(d.into())) + // Minimum execution time: 24_286_000 picoseconds. + Weight::from_parts(25_657_314, 3593) + // Standard Error: 215 + .saturating_add(Weight::from_parts(1_116, 0).saturating_mul(d.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn approve_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `368` + // Measured: `401` // Estimated: `3642` - // Minimum execution time: 10_873_000 picoseconds. - Weight::from_parts(11_421_000, 3642) + // Minimum execution time: 12_526_000 picoseconds. + Weight::from_parts(13_373_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `388` + // Measured: `421` // Estimated: `3642` - // Minimum execution time: 9_181_000 picoseconds. - Weight::from_parts(9_726_000, 3642) + // Minimum execution time: 11_807_000 picoseconds. + Weight::from_parts(12_340_000, 3642) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `564` + // Measured: `597` // Estimated: `3642` - // Minimum execution time: 30_257_000 picoseconds. - Weight::from_parts(30_751_000, 3642) + // Minimum execution time: 27_183_000 picoseconds. + Weight::from_parts(28_250_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `560` + // Measured: `593` // Estimated: `3642` - // Minimum execution time: 27_850_000 picoseconds. - Weight::from_parts(28_821_000, 3642) + // Minimum execution time: 26_775_000 picoseconds. + Weight::from_parts(27_667_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:0) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn award_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `572` + // Measured: `605` // Estimated: `3642` - // Minimum execution time: 19_164_000 picoseconds. - Weight::from_parts(20_136_000, 3642) + // Minimum execution time: 16_089_000 picoseconds. + Weight::from_parts(16_909_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:3 w:3) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) - /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `936` + // Measured: `969` // Estimated: `8799` - // Minimum execution time: 120_235_000 picoseconds. - Weight::from_parts(121_673_000, 8799) + // Minimum execution time: 104_973_000 picoseconds. + Weight::from_parts(107_696_000, 8799) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:0) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `616` + // Measured: `649` // Estimated: `3642` - // Minimum execution time: 35_713_000 picoseconds. - Weight::from_parts(37_174_000, 3642) + // Minimum execution time: 30_702_000 picoseconds. + Weight::from_parts(32_615_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:0) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `852` + // Measured: `885` // Estimated: `6196` - // Minimum execution time: 81_037_000 picoseconds. - Weight::from_parts(83_294_000, 6196) + // Minimum execution time: 72_055_000 picoseconds. + Weight::from_parts(73_900_000, 6196) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn extend_bounty_expiry() -> Weight { // Proof Size summary in bytes: - // Measured: `424` + // Measured: `457` // Estimated: `3642` - // Minimum execution time: 15_348_000 picoseconds. - Weight::from_parts(15_776_000, 3642) + // Minimum execution time: 12_057_000 picoseconds. + Weight::from_parts(12_744_000, 3642) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:100 w:100) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:200 w:200) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:100 w:100) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:200 w:200) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 100]`. fn spend_funds(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `4 + b * (297 Β±0)` + // Measured: `37 + b * (297 Β±0)` // Estimated: `1887 + b * (5206 Β±0)` - // Minimum execution time: 5_082_000 picoseconds. - Weight::from_parts(5_126_000, 1887) - // Standard Error: 21_949 - .saturating_add(Weight::from_parts(42_635_308, 0).saturating_mul(b.into())) + // Minimum execution time: 3_489_000 picoseconds. + Weight::from_parts(8_384_129, 1887) + // Standard Error: 18_066 + .saturating_add(Weight::from_parts(31_612_331, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) diff --git a/substrate/frame/broker/src/weights.rs b/substrate/frame/broker/src/weights.rs index a8f50eeee6e..133ea63e35f 100644 --- a/substrate/frame/broker/src/weights.rs +++ b/substrate/frame/broker/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_broker` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-pzhd7p6z-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_broker +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_broker -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/broker/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -87,8 +89,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_040_000 picoseconds. - Weight::from_parts(3_344_000, 0) + // Minimum execution time: 2_701_000 picoseconds. + Weight::from_parts(2_902_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -97,8 +99,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 21_259_000 picoseconds. - Weight::from_parts(22_110_000, 7496) + // Minimum execution time: 18_056_000 picoseconds. + Weight::from_parts(19_093_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -108,8 +110,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 20_330_000 picoseconds. - Weight::from_parts(20_826_000, 7496) + // Minimum execution time: 17_233_000 picoseconds. + Weight::from_parts(17_788_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -119,8 +121,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 13_411_000 picoseconds. - Weight::from_parts(13_960_000, 1526) + // Minimum execution time: 9_740_000 picoseconds. + Weight::from_parts(10_504_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -139,14 +141,12 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Broker::Workplan` (r:0 w:10) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(n: u32, ) -> Weight { + fn start_sales(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 57_770_000 picoseconds. - Weight::from_parts(61_047_512, 8499) - // Standard Error: 165 - .saturating_add(Weight::from_parts(3, 0).saturating_mul(n.into())) + // Minimum execution time: 49_728_000 picoseconds. + Weight::from_parts(52_765_861, 8499) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(16_u64)) } @@ -162,10 +162,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `568` - // Estimated: `2053` - // Minimum execution time: 51_196_000 picoseconds. - Weight::from_parts(52_382_000, 2053) + // Measured: `635` + // Estimated: `2120` + // Minimum execution time: 41_986_000 picoseconds. + Weight::from_parts(43_465_000, 2120) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -185,10 +185,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `686` + // Measured: `753` // Estimated: `4698` - // Minimum execution time: 71_636_000 picoseconds. - Weight::from_parts(73_679_000, 4698) + // Minimum execution time: 61_779_000 picoseconds. + Weight::from_parts(62_563_000, 4698) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -198,8 +198,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 19_182_000 picoseconds. - Weight::from_parts(19_775_000, 3550) + // Minimum execution time: 16_962_000 picoseconds. + Weight::from_parts(17_733_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -209,21 +209,21 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 20_688_000 picoseconds. - Weight::from_parts(21_557_000, 3550) + // Minimum execution time: 18_380_000 picoseconds. + Weight::from_parts(19_105_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: `Broker::Regions` (r:1 w:2) + /// Storage: `Broker::Regions` (r:1 w:3) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 21_190_000 picoseconds. - Weight::from_parts(22_215_000, 3550) + // Minimum execution time: 20_115_000 picoseconds. + Weight::from_parts(20_741_000, 3550) .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) @@ -237,8 +237,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `740` // Estimated: `4681` - // Minimum execution time: 34_591_000 picoseconds. - Weight::from_parts(36_227_000, 4681) + // Minimum execution time: 31_339_000 picoseconds. + Weight::from_parts(32_639_000, 4681) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -256,8 +256,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `775` // Estimated: `5996` - // Minimum execution time: 40_346_000 picoseconds. - Weight::from_parts(41_951_000, 5996) + // Minimum execution time: 37_542_000 picoseconds. + Weight::from_parts(38_521_000, 5996) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -272,10 +272,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 Β±0)` - // Minimum execution time: 75_734_000 picoseconds. - Weight::from_parts(78_168_395, 6196) - // Standard Error: 63_180 - .saturating_add(Weight::from_parts(1_076_259, 0).saturating_mul(m.into())) + // Minimum execution time: 66_176_000 picoseconds. + Weight::from_parts(68_356_745, 6196) + // Standard Error: 68_008 + .saturating_add(Weight::from_parts(1_558_419, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -287,8 +287,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 46_383_000 picoseconds. - Weight::from_parts(47_405_000, 3593) + // Minimum execution time: 41_130_000 picoseconds. + Weight::from_parts(41_914_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -300,8 +300,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `603` // Estimated: `3550` - // Minimum execution time: 30_994_000 picoseconds. - Weight::from_parts(31_979_000, 3550) + // Minimum execution time: 31_042_000 picoseconds. + Weight::from_parts(34_087_000, 3550) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -315,8 +315,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 37_584_000 picoseconds. - Weight::from_parts(44_010_000, 3533) + // Minimum execution time: 39_116_000 picoseconds. + Weight::from_parts(39_990_000, 3533) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -330,10 +330,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `830` + // Measured: `995` // Estimated: `3593` - // Minimum execution time: 45_266_000 picoseconds. - Weight::from_parts(48_000_000, 3593) + // Minimum execution time: 47_547_000 picoseconds. + Weight::from_parts(50_274_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -343,34 +343,32 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: - // Measured: `525` + // Measured: `661` // Estimated: `4698` - // Minimum execution time: 25_365_000 picoseconds. - Weight::from_parts(26_920_000, 4698) + // Minimum execution time: 26_707_000 picoseconds. + Weight::from_parts(27_217_000, 4698) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// The range of component `n` is `[0, 1000]`. - fn request_core_count(n: u32, ) -> Weight { + fn request_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_519_000 picoseconds. - Weight::from_parts(7_098_698, 0) - // Standard Error: 20 - .saturating_add(Weight::from_parts(8, 0).saturating_mul(n.into())) + // Minimum execution time: 4_651_000 picoseconds. + Weight::from_parts(5_231_385, 0) } - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn process_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `98` - // Estimated: `3563` - // Minimum execution time: 7_608_000 picoseconds. - Weight::from_parts(8_157_815, 3563) - // Standard Error: 26 - .saturating_add(Weight::from_parts(48, 0).saturating_mul(n.into())) + // Measured: `404` + // Estimated: `1487` + // Minimum execution time: 6_806_000 picoseconds. + Weight::from_parts(7_264_002, 1487) + // Standard Error: 21 + .saturating_add(Weight::from_parts(31, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -386,10 +384,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `905` - // Estimated: `4370` - // Minimum execution time: 59_993_000 picoseconds. - Weight::from_parts(61_752_000, 4370) + // Measured: `972` + // Estimated: `4437` + // Minimum execution time: 48_297_000 picoseconds. + Weight::from_parts(49_613_000, 4437) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -408,10 +406,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 41_863_000 picoseconds. - Weight::from_parts(44_033_031, 8499) - // Standard Error: 116 - .saturating_add(Weight::from_parts(764, 0).saturating_mul(n.into())) + // Minimum execution time: 36_715_000 picoseconds. + Weight::from_parts(38_580_380, 8499) + // Standard Error: 91 + .saturating_add(Weight::from_parts(1_163, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)) } @@ -423,8 +421,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 9_588_000 picoseconds. - Weight::from_parts(9_925_000, 3493) + // Minimum execution time: 7_564_000 picoseconds. + Weight::from_parts(7_932_000, 3493) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -436,8 +434,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 19_308_000 picoseconds. - Weight::from_parts(20_482_000, 4681) + // Minimum execution time: 17_082_000 picoseconds. + Weight::from_parts(17_662_000, 4681) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -445,28 +443,35 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 147_000 picoseconds. - Weight::from_parts(184_000, 0) + // Minimum execution time: 175_000 picoseconds. + Weight::from_parts(223_000, 0) } + /// Storage: `Broker::CoreCountInbox` (r:0 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) fn notify_core_count() -> Weight { - T::DbWeight::get().reads_writes(1, 1) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_432_000 picoseconds. + Weight::from_parts(2_536_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:0) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `699` - // Estimated: `4164` - // Minimum execution time: 19_824_000 picoseconds. - Weight::from_parts(20_983_000, 4164) + // Measured: `603` + // Estimated: `4068` + // Minimum execution time: 13_080_000 picoseconds. + Weight::from_parts(13_937_000, 4068) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } } @@ -478,8 +483,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_040_000 picoseconds. - Weight::from_parts(3_344_000, 0) + // Minimum execution time: 2_701_000 picoseconds. + Weight::from_parts(2_902_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -488,8 +493,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 21_259_000 picoseconds. - Weight::from_parts(22_110_000, 7496) + // Minimum execution time: 18_056_000 picoseconds. + Weight::from_parts(19_093_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -499,8 +504,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 20_330_000 picoseconds. - Weight::from_parts(20_826_000, 7496) + // Minimum execution time: 17_233_000 picoseconds. + Weight::from_parts(17_788_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -510,8 +515,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 13_411_000 picoseconds. - Weight::from_parts(13_960_000, 1526) + // Minimum execution time: 9_740_000 picoseconds. + Weight::from_parts(10_504_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -530,14 +535,12 @@ impl WeightInfo for () { /// Storage: `Broker::Workplan` (r:0 w:10) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn start_sales(n: u32, ) -> Weight { + fn start_sales(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 57_770_000 picoseconds. - Weight::from_parts(61_047_512, 8499) - // Standard Error: 165 - .saturating_add(Weight::from_parts(3, 0).saturating_mul(n.into())) + // Minimum execution time: 49_728_000 picoseconds. + Weight::from_parts(52_765_861, 8499) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(16_u64)) } @@ -553,10 +556,10 @@ impl WeightInfo for () { /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `568` - // Estimated: `2053` - // Minimum execution time: 51_196_000 picoseconds. - Weight::from_parts(52_382_000, 2053) + // Measured: `635` + // Estimated: `2120` + // Minimum execution time: 41_986_000 picoseconds. + Weight::from_parts(43_465_000, 2120) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -576,10 +579,10 @@ impl WeightInfo for () { /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `686` + // Measured: `753` // Estimated: `4698` - // Minimum execution time: 71_636_000 picoseconds. - Weight::from_parts(73_679_000, 4698) + // Minimum execution time: 61_779_000 picoseconds. + Weight::from_parts(62_563_000, 4698) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -589,8 +592,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 19_182_000 picoseconds. - Weight::from_parts(19_775_000, 3550) + // Minimum execution time: 16_962_000 picoseconds. + Weight::from_parts(17_733_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -600,21 +603,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 20_688_000 picoseconds. - Weight::from_parts(21_557_000, 3550) + // Minimum execution time: 18_380_000 picoseconds. + Weight::from_parts(19_105_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: `Broker::Regions` (r:1 w:2) + /// Storage: `Broker::Regions` (r:1 w:3) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3550` - // Minimum execution time: 21_190_000 picoseconds. - Weight::from_parts(22_215_000, 3550) + // Minimum execution time: 20_115_000 picoseconds. + Weight::from_parts(20_741_000, 3550) .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) @@ -628,8 +631,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `740` // Estimated: `4681` - // Minimum execution time: 34_591_000 picoseconds. - Weight::from_parts(36_227_000, 4681) + // Minimum execution time: 31_339_000 picoseconds. + Weight::from_parts(32_639_000, 4681) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -647,8 +650,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `775` // Estimated: `5996` - // Minimum execution time: 40_346_000 picoseconds. - Weight::from_parts(41_951_000, 5996) + // Minimum execution time: 37_542_000 picoseconds. + Weight::from_parts(38_521_000, 5996) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -663,10 +666,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 Β±0)` - // Minimum execution time: 75_734_000 picoseconds. - Weight::from_parts(78_168_395, 6196) - // Standard Error: 63_180 - .saturating_add(Weight::from_parts(1_076_259, 0).saturating_mul(m.into())) + // Minimum execution time: 66_176_000 picoseconds. + Weight::from_parts(68_356_745, 6196) + // Standard Error: 68_008 + .saturating_add(Weight::from_parts(1_558_419, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -678,8 +681,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 46_383_000 picoseconds. - Weight::from_parts(47_405_000, 3593) + // Minimum execution time: 41_130_000 picoseconds. + Weight::from_parts(41_914_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -691,8 +694,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `603` // Estimated: `3550` - // Minimum execution time: 30_994_000 picoseconds. - Weight::from_parts(31_979_000, 3550) + // Minimum execution time: 31_042_000 picoseconds. + Weight::from_parts(34_087_000, 3550) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -706,8 +709,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 37_584_000 picoseconds. - Weight::from_parts(44_010_000, 3533) + // Minimum execution time: 39_116_000 picoseconds. + Weight::from_parts(39_990_000, 3533) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -721,10 +724,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `830` + // Measured: `995` // Estimated: `3593` - // Minimum execution time: 45_266_000 picoseconds. - Weight::from_parts(48_000_000, 3593) + // Minimum execution time: 47_547_000 picoseconds. + Weight::from_parts(50_274_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -734,34 +737,32 @@ impl WeightInfo for () { /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: - // Measured: `525` + // Measured: `661` // Estimated: `4698` - // Minimum execution time: 25_365_000 picoseconds. - Weight::from_parts(26_920_000, 4698) + // Minimum execution time: 26_707_000 picoseconds. + Weight::from_parts(27_217_000, 4698) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// The range of component `n` is `[0, 1000]`. - fn request_core_count(n: u32, ) -> Weight { + fn request_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_519_000 picoseconds. - Weight::from_parts(7_098_698, 0) - // Standard Error: 20 - .saturating_add(Weight::from_parts(8, 0).saturating_mul(n.into())) + // Minimum execution time: 4_651_000 picoseconds. + Weight::from_parts(5_231_385, 0) } - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn process_core_count(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `98` - // Estimated: `3563` - // Minimum execution time: 7_608_000 picoseconds. - Weight::from_parts(8_157_815, 3563) - // Standard Error: 26 - .saturating_add(Weight::from_parts(48, 0).saturating_mul(n.into())) + // Measured: `404` + // Estimated: `1487` + // Minimum execution time: 6_806_000 picoseconds. + Weight::from_parts(7_264_002, 1487) + // Standard Error: 21 + .saturating_add(Weight::from_parts(31, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -777,10 +778,10 @@ impl WeightInfo for () { /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `905` - // Estimated: `4370` - // Minimum execution time: 59_993_000 picoseconds. - Weight::from_parts(61_752_000, 4370) + // Measured: `972` + // Estimated: `4437` + // Minimum execution time: 48_297_000 picoseconds. + Weight::from_parts(49_613_000, 4437) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -799,10 +800,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 41_863_000 picoseconds. - Weight::from_parts(44_033_031, 8499) - // Standard Error: 116 - .saturating_add(Weight::from_parts(764, 0).saturating_mul(n.into())) + // Minimum execution time: 36_715_000 picoseconds. + Weight::from_parts(38_580_380, 8499) + // Standard Error: 91 + .saturating_add(Weight::from_parts(1_163, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(15_u64)) } @@ -814,8 +815,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 9_588_000 picoseconds. - Weight::from_parts(9_925_000, 3493) + // Minimum execution time: 7_564_000 picoseconds. + Weight::from_parts(7_932_000, 3493) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -827,8 +828,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 19_308_000 picoseconds. - Weight::from_parts(20_482_000, 4681) + // Minimum execution time: 17_082_000 picoseconds. + Weight::from_parts(17_662_000, 4681) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -836,28 +837,34 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 147_000 picoseconds. - Weight::from_parts(184_000, 0) + // Minimum execution time: 175_000 picoseconds. + Weight::from_parts(223_000, 0) } + /// Storage: `Broker::CoreCountInbox` (r:0 w:1) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) fn notify_core_count() -> Weight { - RocksDbWeight::get().reads(1) - .saturating_add(RocksDbWeight::get().writes(1)) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_432_000 picoseconds. + Weight::from_parts(2_536_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x18194fcb5c1fcace44d2d0a004272614` (r:1 w:1) + /// Storage: `Broker::CoreCountInbox` (r:1 w:0) + /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `699` - // Estimated: `4164` - // Minimum execution time: 19_824_000 picoseconds. - Weight::from_parts(20_983_000, 4164) + // Measured: `603` + // Estimated: `4068` + // Minimum execution time: 13_080_000 picoseconds. + Weight::from_parts(13_937_000, 4068) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/substrate/frame/child-bounties/src/weights.rs b/substrate/frame/child-bounties/src/weights.rs index e4c1f238e88..6427517b45b 100644 --- a/substrate/frame/child-bounties/src/weights.rs +++ b/substrate/frame/child-bounties/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_child_bounties +//! Autogenerated weights for `pallet_child_bounties` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/child-bounties/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/child-bounties/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_child_bounties. +/// Weight functions needed for `pallet_child_bounties`. pub trait WeightInfo { fn add_child_bounty(d: u32, ) -> Weight; fn propose_curator() -> Weight; @@ -62,288 +61,288 @@ pub trait WeightInfo { fn close_child_bounty_active() -> Weight; } -/// Weights for pallet_child_bounties using the Substrate node and recommended hardware. +/// Weights for `pallet_child_bounties` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: ChildBounties ParentChildBounties (r:1 w:1) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyCount (r:1 w:1) - /// Proof: ChildBounties ChildBountyCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) - /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:0 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyCount` (r:1 w:1) + /// Proof: `ChildBounties::ChildBountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:0 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 300]`. fn add_child_bounty(_d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `712` + // Measured: `745` // Estimated: `6196` - // Minimum execution time: 69_805_000 picoseconds. - Weight::from_parts(73_216_717, 6196) + // Minimum execution time: 60_674_000 picoseconds. + Weight::from_parts(63_477_428, 6196) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) - /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `766` + // Measured: `799` // Estimated: `3642` - // Minimum execution time: 18_190_000 picoseconds. - Weight::from_parts(18_932_000, 3642) + // Minimum execution time: 17_754_000 picoseconds. + Weight::from_parts(18_655_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `912` + // Measured: `945` // Estimated: `3642` - // Minimum execution time: 35_035_000 picoseconds. - Weight::from_parts(35_975_000, 3642) + // Minimum execution time: 31_580_000 picoseconds. + Weight::from_parts(32_499_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `912` + // Measured: `945` // Estimated: `3642` - // Minimum execution time: 37_636_000 picoseconds. - Weight::from_parts(38_610_000, 3642) + // Minimum execution time: 33_536_000 picoseconds. + Weight::from_parts(34_102_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) fn award_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `809` + // Measured: `842` // Estimated: `3642` - // Minimum execution time: 22_457_000 picoseconds. - Weight::from_parts(23_691_000, 3642) + // Minimum execution time: 18_295_000 picoseconds. + Weight::from_parts(19_496_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: System Account (r:3 w:3) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:1) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) - /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn claim_child_bounty() -> Weight { // Proof Size summary in bytes: // Measured: `682` // Estimated: `8799` - // Minimum execution time: 118_272_000 picoseconds. - Weight::from_parts(121_646_000, 8799) + // Minimum execution time: 102_367_000 picoseconds. + Weight::from_parts(104_352_000, 8799) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) - /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:1) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) - /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_child_bounty_added() -> Weight { // Proof Size summary in bytes: - // Measured: `1012` + // Measured: `1045` // Estimated: `6196` - // Minimum execution time: 75_717_000 picoseconds. - Weight::from_parts(77_837_000, 6196) + // Minimum execution time: 69_051_000 picoseconds. + Weight::from_parts(71_297_000, 6196) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: System Account (r:3 w:3) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) - /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:1) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) - /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_child_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `1199` + // Measured: `1232` // Estimated: `8799` - // Minimum execution time: 94_215_000 picoseconds. - Weight::from_parts(97_017_000, 8799) + // Minimum execution time: 84_053_000 picoseconds. + Weight::from_parts(86_072_000, 8799) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: ChildBounties ParentChildBounties (r:1 w:1) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyCount (r:1 w:1) - /// Proof: ChildBounties ChildBountyCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) - /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:0 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyCount` (r:1 w:1) + /// Proof: `ChildBounties::ChildBountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:0 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 300]`. fn add_child_bounty(_d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `712` + // Measured: `745` // Estimated: `6196` - // Minimum execution time: 69_805_000 picoseconds. - Weight::from_parts(73_216_717, 6196) + // Minimum execution time: 60_674_000 picoseconds. + Weight::from_parts(63_477_428, 6196) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) - /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `766` + // Measured: `799` // Estimated: `3642` - // Minimum execution time: 18_190_000 picoseconds. - Weight::from_parts(18_932_000, 3642) + // Minimum execution time: 17_754_000 picoseconds. + Weight::from_parts(18_655_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `912` + // Measured: `945` // Estimated: `3642` - // Minimum execution time: 35_035_000 picoseconds. - Weight::from_parts(35_975_000, 3642) + // Minimum execution time: 31_580_000 picoseconds. + Weight::from_parts(32_499_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `912` + // Measured: `945` // Estimated: `3642` - // Minimum execution time: 37_636_000 picoseconds. - Weight::from_parts(38_610_000, 3642) + // Minimum execution time: 33_536_000 picoseconds. + Weight::from_parts(34_102_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) fn award_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `809` + // Measured: `842` // Estimated: `3642` - // Minimum execution time: 22_457_000 picoseconds. - Weight::from_parts(23_691_000, 3642) + // Minimum execution time: 18_295_000 picoseconds. + Weight::from_parts(19_496_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: System Account (r:3 w:3) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:1) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) - /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn claim_child_bounty() -> Weight { // Proof Size summary in bytes: // Measured: `682` // Estimated: `8799` - // Minimum execution time: 118_272_000 picoseconds. - Weight::from_parts(121_646_000, 8799) + // Minimum execution time: 102_367_000 picoseconds. + Weight::from_parts(104_352_000, 8799) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) - /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:1) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) - /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_child_bounty_added() -> Weight { // Proof Size summary in bytes: - // Measured: `1012` + // Measured: `1045` // Estimated: `6196` - // Minimum execution time: 75_717_000 picoseconds. - Weight::from_parts(77_837_000, 6196) + // Minimum execution time: 69_051_000 picoseconds. + Weight::from_parts(71_297_000, 6196) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Bounties Bounties (r:1 w:0) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBounties (r:1 w:1) - /// Proof: ChildBounties ChildBounties (max_values: None, max_size: Some(145), added: 2620, mode: MaxEncodedLen) - /// Storage: System Account (r:3 w:3) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) - /// Proof: ChildBounties ChildrenCuratorFees (max_values: None, max_size: Some(28), added: 2503, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:1) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) - /// Proof: ChildBounties ChildBountyDescriptions (max_values: None, max_size: Some(314), added: 2789, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_child_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `1199` + // Measured: `1232` // Estimated: `8799` - // Minimum execution time: 94_215_000 picoseconds. - Weight::from_parts(97_017_000, 8799) + // Minimum execution time: 84_053_000 picoseconds. + Weight::from_parts(86_072_000, 8799) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } diff --git a/substrate/frame/collective/src/tests.rs b/substrate/frame/collective/src/tests.rs index aae17b7ffc2..ac5797c638c 100644 --- a/substrate/frame/collective/src/tests.rs +++ b/substrate/frame/collective/src/tests.rs @@ -29,7 +29,7 @@ use sp_core::H256; use sp_runtime::{testing::Header, traits::BlakeTwo256, BuildStorage}; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Test diff --git a/substrate/frame/collective/src/weights.rs b/substrate/frame/collective/src/weights.rs index eece6a006b8..85744b4de9d 100644 --- a/substrate/frame/collective/src/weights.rs +++ b/substrate/frame/collective/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_collective +//! Autogenerated weights for `pallet_collective` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/collective/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/collective/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_collective. +/// Weight functions needed for `pallet_collective`. pub trait WeightInfo { fn set_members(m: u32, n: u32, p: u32, ) -> Weight; fn execute(b: u32, m: u32, ) -> Weight; @@ -64,30 +63,30 @@ pub trait WeightInfo { fn disapprove_proposal(p: u32, ) -> Weight; } -/// Weights for pallet_collective using the Substrate node and recommended hardware. +/// Weights for `pallet_collective` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Council Members (r:1 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:100 w:100) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Prime (r:0 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:1) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:0) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:100 w:100) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:0 w:1) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[0, 100]`. /// The range of component `n` is `[0, 100]`. /// The range of component `p` is `[0, 100]`. fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + m * (3232 Β±0) + p * (3190 Β±0)` - // Estimated: `15861 + m * (1967 Β±24) + p * (4332 Β±24)` - // Minimum execution time: 17_506_000 picoseconds. - Weight::from_parts(17_767_000, 15861) - // Standard Error: 60_220 - .saturating_add(Weight::from_parts(4_374_805, 0).saturating_mul(m.into())) - // Standard Error: 60_220 - .saturating_add(Weight::from_parts(8_398_316, 0).saturating_mul(p.into())) + // Estimated: `15894 + m * (1967 Β±23) + p * (4332 Β±23)` + // Minimum execution time: 15_559_000 picoseconds. + Weight::from_parts(15_870_000, 15894) + // Standard Error: 54_310 + .saturating_add(Weight::from_parts(4_117_753, 0).saturating_mul(m.into())) + // Standard Error: 54_310 + .saturating_add(Weight::from_parts(7_677_627, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -95,245 +94,261 @@ impl WeightInfo for SubstrateWeight { .saturating_add(Weight::from_parts(0, 1967).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 4332).saturating_mul(p.into())) } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[1, 100]`. fn execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `202 + m * (32 Β±0)` - // Estimated: `1688 + m * (32 Β±0)` - // Minimum execution time: 16_203_000 picoseconds. - Weight::from_parts(15_348_267, 1688) - // Standard Error: 37 - .saturating_add(Weight::from_parts(1_766, 0).saturating_mul(b.into())) - // Standard Error: 382 - .saturating_add(Weight::from_parts(15_765, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Measured: `380 + m * (32 Β±0)` + // Estimated: `3997 + m * (32 Β±0)` + // Minimum execution time: 19_024_000 picoseconds. + Weight::from_parts(18_153_872, 3997) + // Standard Error: 46 + .saturating_add(Weight::from_parts(1_933, 0).saturating_mul(b.into())) + // Standard Error: 478 + .saturating_add(Weight::from_parts(18_872, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:0) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:0) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[1, 100]`. fn propose_execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `202 + m * (32 Β±0)` - // Estimated: `3668 + m * (32 Β±0)` - // Minimum execution time: 18_642_000 picoseconds. - Weight::from_parts(17_708_609, 3668) - // Standard Error: 58 - .saturating_add(Weight::from_parts(2_285, 0).saturating_mul(b.into())) - // Standard Error: 598 - .saturating_add(Weight::from_parts(30_454, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `380 + m * (32 Β±0)` + // Estimated: `3997 + m * (32 Β±0)` + // Minimum execution time: 20_895_000 picoseconds. + Weight::from_parts(20_081_761, 3997) + // Standard Error: 57 + .saturating_add(Weight::from_parts(1_982, 0).saturating_mul(b.into())) + // Standard Error: 594 + .saturating_add(Weight::from_parts(32_085, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalCount (r:1 w:1) - /// Proof Skipped: Council ProposalCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:0 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalCount` (r:1 w:1) + /// Proof: `Council::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:0 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[2, 100]`. /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `492 + m * (32 Β±0) + p * (36 Β±0)` - // Estimated: `3884 + m * (33 Β±0) + p * (36 Β±0)` - // Minimum execution time: 27_067_000 picoseconds. - Weight::from_parts(25_456_964, 3884) - // Standard Error: 112 - .saturating_add(Weight::from_parts(3_773, 0).saturating_mul(b.into())) - // Standard Error: 1_177 - .saturating_add(Weight::from_parts(32_783, 0).saturating_mul(m.into())) - // Standard Error: 1_162 - .saturating_add(Weight::from_parts(194_388, 0).saturating_mul(p.into())) + // Measured: `525 + m * (32 Β±0) + p * (36 Β±0)` + // Estimated: `3917 + m * (33 Β±0) + p * (36 Β±0)` + // Minimum execution time: 22_068_000 picoseconds. + Weight::from_parts(19_639_088, 3917) + // Standard Error: 104 + .saturating_add(Weight::from_parts(3_896, 0).saturating_mul(b.into())) + // Standard Error: 1_095 + .saturating_add(Weight::from_parts(31_634, 0).saturating_mul(m.into())) + // Standard Error: 1_081 + .saturating_add(Weight::from_parts(178_702, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `941 + m * (64 Β±0)` - // Estimated: `4405 + m * (64 Β±0)` - // Minimum execution time: 26_055_000 picoseconds. - Weight::from_parts(27_251_907, 4405) - // Standard Error: 1_008 - .saturating_add(Weight::from_parts(65_947, 0).saturating_mul(m.into())) + // Measured: `974 + m * (64 Β±0)` + // Estimated: `4438 + m * (64 Β±0)` + // Minimum execution time: 22_395_000 picoseconds. + Weight::from_parts(23_025_217, 4438) + // Standard Error: 842 + .saturating_add(Weight::from_parts(58_102, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:0 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `530 + m * (64 Β±0) + p * (36 Β±0)` - // Estimated: `3975 + m * (65 Β±0) + p * (36 Β±0)` - // Minimum execution time: 28_363_000 picoseconds. - Weight::from_parts(28_733_464, 3975) - // Standard Error: 1_275 - .saturating_add(Weight::from_parts(43_236, 0).saturating_mul(m.into())) - // Standard Error: 1_244 - .saturating_add(Weight::from_parts(180_187, 0).saturating_mul(p.into())) + // Measured: `563 + m * (64 Β±0) + p * (36 Β±0)` + // Estimated: `4008 + m * (65 Β±0) + p * (36 Β±0)` + // Minimum execution time: 24_179_000 picoseconds. + Weight::from_parts(23_846_394, 4008) + // Standard Error: 1_052 + .saturating_add(Weight::from_parts(40_418, 0).saturating_mul(m.into())) + // Standard Error: 1_026 + .saturating_add(Weight::from_parts(171_653, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `832 + b * (1 Β±0) + m * (64 Β±0) + p * (40 Β±0)` - // Estimated: `4149 + b * (1 Β±0) + m * (66 Β±0) + p * (40 Β±0)` - // Minimum execution time: 40_391_000 picoseconds. - Weight::from_parts(42_695_215, 4149) - // Standard Error: 167 - .saturating_add(Weight::from_parts(3_622, 0).saturating_mul(b.into())) - // Standard Error: 1_772 - .saturating_add(Weight::from_parts(33_830, 0).saturating_mul(m.into())) - // Standard Error: 1_727 - .saturating_add(Weight::from_parts(205_374, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Measured: `1010 + b * (1 Β±0) + m * (64 Β±0) + p * (40 Β±0)` + // Estimated: `4327 + b * (1 Β±0) + m * (66 Β±0) + p * (40 Β±0)` + // Minimum execution time: 42_129_000 picoseconds. + Weight::from_parts(40_808_957, 4327) + // Standard Error: 134 + .saturating_add(Weight::from_parts(3_579, 0).saturating_mul(b.into())) + // Standard Error: 1_425 + .saturating_add(Weight::from_parts(37_166, 0).saturating_mul(m.into())) + // Standard Error: 1_389 + .saturating_add(Weight::from_parts(200_986, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:0) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:1 w:0) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:0 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `550 + m * (64 Β±0) + p * (36 Β±0)` - // Estimated: `3995 + m * (65 Β±0) + p * (36 Β±0)` - // Minimum execution time: 31_368_000 picoseconds. - Weight::from_parts(32_141_835, 3995) - // Standard Error: 1_451 - .saturating_add(Weight::from_parts(36_372, 0).saturating_mul(m.into())) - // Standard Error: 1_415 - .saturating_add(Weight::from_parts(210_635, 0).saturating_mul(p.into())) + // Measured: `583 + m * (64 Β±0) + p * (36 Β±0)` + // Estimated: `4028 + m * (65 Β±0) + p * (36 Β±0)` + // Minimum execution time: 26_385_000 picoseconds. + Weight::from_parts(25_713_839, 4028) + // Standard Error: 1_254 + .saturating_add(Weight::from_parts(36_206, 0).saturating_mul(m.into())) + // Standard Error: 1_223 + .saturating_add(Weight::from_parts(195_114, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:0) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:1 w:0) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `852 + b * (1 Β±0) + m * (64 Β±0) + p * (40 Β±0)` - // Estimated: `4169 + b * (1 Β±0) + m * (66 Β±0) + p * (40 Β±0)` - // Minimum execution time: 43_271_000 picoseconds. - Weight::from_parts(45_495_648, 4169) - // Standard Error: 174 - .saturating_add(Weight::from_parts(3_034, 0).saturating_mul(b.into())) - // Standard Error: 1_840 - .saturating_add(Weight::from_parts(42_209, 0).saturating_mul(m.into())) - // Standard Error: 1_793 - .saturating_add(Weight::from_parts(207_525, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Measured: `1030 + b * (1 Β±0) + m * (64 Β±0) + p * (40 Β±0)` + // Estimated: `4347 + b * (1 Β±0) + m * (66 Β±0) + p * (40 Β±0)` + // Minimum execution time: 42_903_000 picoseconds. + Weight::from_parts(43_152_907, 4347) + // Standard Error: 146 + .saturating_add(Weight::from_parts(3_459, 0).saturating_mul(b.into())) + // Standard Error: 1_548 + .saturating_add(Weight::from_parts(35_321, 0).saturating_mul(m.into())) + // Standard Error: 1_509 + .saturating_add(Weight::from_parts(202_541, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:0 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:0 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:0 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `p` is `[1, 100]`. fn disapprove_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `359 + p * (32 Β±0)` - // Estimated: `1844 + p * (32 Β±0)` - // Minimum execution time: 15_170_000 picoseconds. - Weight::from_parts(17_567_243, 1844) - // Standard Error: 1_430 - .saturating_add(Weight::from_parts(169_040, 0).saturating_mul(p.into())) + // Measured: `392 + p * (32 Β±0)` + // Estimated: `1877 + p * (32 Β±0)` + // Minimum execution time: 12_656_000 picoseconds. + Weight::from_parts(14_032_951, 1877) + // Standard Error: 1_025 + .saturating_add(Weight::from_parts(159_143, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Council Members (r:1 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:100 w:100) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Prime (r:0 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:1) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:0) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:100 w:100) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:0 w:1) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[0, 100]`. /// The range of component `n` is `[0, 100]`. /// The range of component `p` is `[0, 100]`. fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + m * (3232 Β±0) + p * (3190 Β±0)` - // Estimated: `15861 + m * (1967 Β±24) + p * (4332 Β±24)` - // Minimum execution time: 17_506_000 picoseconds. - Weight::from_parts(17_767_000, 15861) - // Standard Error: 60_220 - .saturating_add(Weight::from_parts(4_374_805, 0).saturating_mul(m.into())) - // Standard Error: 60_220 - .saturating_add(Weight::from_parts(8_398_316, 0).saturating_mul(p.into())) + // Estimated: `15894 + m * (1967 Β±23) + p * (4332 Β±23)` + // Minimum execution time: 15_559_000 picoseconds. + Weight::from_parts(15_870_000, 15894) + // Standard Error: 54_310 + .saturating_add(Weight::from_parts(4_117_753, 0).saturating_mul(m.into())) + // Standard Error: 54_310 + .saturating_add(Weight::from_parts(7_677_627, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -341,216 +356,232 @@ impl WeightInfo for () { .saturating_add(Weight::from_parts(0, 1967).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 4332).saturating_mul(p.into())) } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[1, 100]`. fn execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `202 + m * (32 Β±0)` - // Estimated: `1688 + m * (32 Β±0)` - // Minimum execution time: 16_203_000 picoseconds. - Weight::from_parts(15_348_267, 1688) - // Standard Error: 37 - .saturating_add(Weight::from_parts(1_766, 0).saturating_mul(b.into())) - // Standard Error: 382 - .saturating_add(Weight::from_parts(15_765, 0).saturating_mul(m.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Measured: `380 + m * (32 Β±0)` + // Estimated: `3997 + m * (32 Β±0)` + // Minimum execution time: 19_024_000 picoseconds. + Weight::from_parts(18_153_872, 3997) + // Standard Error: 46 + .saturating_add(Weight::from_parts(1_933, 0).saturating_mul(b.into())) + // Standard Error: 478 + .saturating_add(Weight::from_parts(18_872, 0).saturating_mul(m.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:0) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:0) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[1, 100]`. fn propose_execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `202 + m * (32 Β±0)` - // Estimated: `3668 + m * (32 Β±0)` - // Minimum execution time: 18_642_000 picoseconds. - Weight::from_parts(17_708_609, 3668) - // Standard Error: 58 - .saturating_add(Weight::from_parts(2_285, 0).saturating_mul(b.into())) - // Standard Error: 598 - .saturating_add(Weight::from_parts(30_454, 0).saturating_mul(m.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `380 + m * (32 Β±0)` + // Estimated: `3997 + m * (32 Β±0)` + // Minimum execution time: 20_895_000 picoseconds. + Weight::from_parts(20_081_761, 3997) + // Standard Error: 57 + .saturating_add(Weight::from_parts(1_982, 0).saturating_mul(b.into())) + // Standard Error: 594 + .saturating_add(Weight::from_parts(32_085, 0).saturating_mul(m.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalCount (r:1 w:1) - /// Proof Skipped: Council ProposalCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:0 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalCount` (r:1 w:1) + /// Proof: `Council::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:0 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[2, 100]`. /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `492 + m * (32 Β±0) + p * (36 Β±0)` - // Estimated: `3884 + m * (33 Β±0) + p * (36 Β±0)` - // Minimum execution time: 27_067_000 picoseconds. - Weight::from_parts(25_456_964, 3884) - // Standard Error: 112 - .saturating_add(Weight::from_parts(3_773, 0).saturating_mul(b.into())) - // Standard Error: 1_177 - .saturating_add(Weight::from_parts(32_783, 0).saturating_mul(m.into())) - // Standard Error: 1_162 - .saturating_add(Weight::from_parts(194_388, 0).saturating_mul(p.into())) + // Measured: `525 + m * (32 Β±0) + p * (36 Β±0)` + // Estimated: `3917 + m * (33 Β±0) + p * (36 Β±0)` + // Minimum execution time: 22_068_000 picoseconds. + Weight::from_parts(19_639_088, 3917) + // Standard Error: 104 + .saturating_add(Weight::from_parts(3_896, 0).saturating_mul(b.into())) + // Standard Error: 1_095 + .saturating_add(Weight::from_parts(31_634, 0).saturating_mul(m.into())) + // Standard Error: 1_081 + .saturating_add(Weight::from_parts(178_702, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `941 + m * (64 Β±0)` - // Estimated: `4405 + m * (64 Β±0)` - // Minimum execution time: 26_055_000 picoseconds. - Weight::from_parts(27_251_907, 4405) - // Standard Error: 1_008 - .saturating_add(Weight::from_parts(65_947, 0).saturating_mul(m.into())) + // Measured: `974 + m * (64 Β±0)` + // Estimated: `4438 + m * (64 Β±0)` + // Minimum execution time: 22_395_000 picoseconds. + Weight::from_parts(23_025_217, 4438) + // Standard Error: 842 + .saturating_add(Weight::from_parts(58_102, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:0 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `530 + m * (64 Β±0) + p * (36 Β±0)` - // Estimated: `3975 + m * (65 Β±0) + p * (36 Β±0)` - // Minimum execution time: 28_363_000 picoseconds. - Weight::from_parts(28_733_464, 3975) - // Standard Error: 1_275 - .saturating_add(Weight::from_parts(43_236, 0).saturating_mul(m.into())) - // Standard Error: 1_244 - .saturating_add(Weight::from_parts(180_187, 0).saturating_mul(p.into())) + // Measured: `563 + m * (64 Β±0) + p * (36 Β±0)` + // Estimated: `4008 + m * (65 Β±0) + p * (36 Β±0)` + // Minimum execution time: 24_179_000 picoseconds. + Weight::from_parts(23_846_394, 4008) + // Standard Error: 1_052 + .saturating_add(Weight::from_parts(40_418, 0).saturating_mul(m.into())) + // Standard Error: 1_026 + .saturating_add(Weight::from_parts(171_653, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `832 + b * (1 Β±0) + m * (64 Β±0) + p * (40 Β±0)` - // Estimated: `4149 + b * (1 Β±0) + m * (66 Β±0) + p * (40 Β±0)` - // Minimum execution time: 40_391_000 picoseconds. - Weight::from_parts(42_695_215, 4149) - // Standard Error: 167 - .saturating_add(Weight::from_parts(3_622, 0).saturating_mul(b.into())) - // Standard Error: 1_772 - .saturating_add(Weight::from_parts(33_830, 0).saturating_mul(m.into())) - // Standard Error: 1_727 - .saturating_add(Weight::from_parts(205_374, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Measured: `1010 + b * (1 Β±0) + m * (64 Β±0) + p * (40 Β±0)` + // Estimated: `4327 + b * (1 Β±0) + m * (66 Β±0) + p * (40 Β±0)` + // Minimum execution time: 42_129_000 picoseconds. + Weight::from_parts(40_808_957, 4327) + // Standard Error: 134 + .saturating_add(Weight::from_parts(3_579, 0).saturating_mul(b.into())) + // Standard Error: 1_425 + .saturating_add(Weight::from_parts(37_166, 0).saturating_mul(m.into())) + // Standard Error: 1_389 + .saturating_add(Weight::from_parts(200_986, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:0) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:1 w:0) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:0 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `550 + m * (64 Β±0) + p * (36 Β±0)` - // Estimated: `3995 + m * (65 Β±0) + p * (36 Β±0)` - // Minimum execution time: 31_368_000 picoseconds. - Weight::from_parts(32_141_835, 3995) - // Standard Error: 1_451 - .saturating_add(Weight::from_parts(36_372, 0).saturating_mul(m.into())) - // Standard Error: 1_415 - .saturating_add(Weight::from_parts(210_635, 0).saturating_mul(p.into())) + // Measured: `583 + m * (64 Β±0) + p * (36 Β±0)` + // Estimated: `4028 + m * (65 Β±0) + p * (36 Β±0)` + // Minimum execution time: 26_385_000 picoseconds. + Weight::from_parts(25_713_839, 4028) + // Standard Error: 1_254 + .saturating_add(Weight::from_parts(36_206, 0).saturating_mul(m.into())) + // Standard Error: 1_223 + .saturating_add(Weight::from_parts(195_114, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:0) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Council::Voting` (r:1 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:1 w:0) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `b` is `[2, 1024]`. /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `852 + b * (1 Β±0) + m * (64 Β±0) + p * (40 Β±0)` - // Estimated: `4169 + b * (1 Β±0) + m * (66 Β±0) + p * (40 Β±0)` - // Minimum execution time: 43_271_000 picoseconds. - Weight::from_parts(45_495_648, 4169) - // Standard Error: 174 - .saturating_add(Weight::from_parts(3_034, 0).saturating_mul(b.into())) - // Standard Error: 1_840 - .saturating_add(Weight::from_parts(42_209, 0).saturating_mul(m.into())) - // Standard Error: 1_793 - .saturating_add(Weight::from_parts(207_525, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Measured: `1030 + b * (1 Β±0) + m * (64 Β±0) + p * (40 Β±0)` + // Estimated: `4347 + b * (1 Β±0) + m * (66 Β±0) + p * (40 Β±0)` + // Minimum execution time: 42_903_000 picoseconds. + Weight::from_parts(43_152_907, 4347) + // Standard Error: 146 + .saturating_add(Weight::from_parts(3_459, 0).saturating_mul(b.into())) + // Standard Error: 1_548 + .saturating_add(Weight::from_parts(35_321, 0).saturating_mul(m.into())) + // Standard Error: 1_509 + .saturating_add(Weight::from_parts(202_541, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) } - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:0 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:0 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:0 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `p` is `[1, 100]`. fn disapprove_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `359 + p * (32 Β±0)` - // Estimated: `1844 + p * (32 Β±0)` - // Minimum execution time: 15_170_000 picoseconds. - Weight::from_parts(17_567_243, 1844) - // Standard Error: 1_430 - .saturating_add(Weight::from_parts(169_040, 0).saturating_mul(p.into())) + // Measured: `392 + p * (32 Β±0)` + // Estimated: `1877 + p * (32 Β±0)` + // Minimum execution time: 12_656_000 picoseconds. + Weight::from_parts(14_032_951, 1877) + // Standard Error: 1_025 + .saturating_add(Weight::from_parts(159_143, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index 962591290b3..47ca17c7d80 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_contracts` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_contracts +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_contracts -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/contracts/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -141,8 +143,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 1_997_000 picoseconds. - Weight::from_parts(2_130_000, 1627) + // Minimum execution time: 2_103_000 picoseconds. + Weight::from_parts(2_260_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -152,10 +154,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `452 + k * (69 Β±0)` // Estimated: `442 + k * (70 Β±0)` - // Minimum execution time: 12_276_000 picoseconds. - Weight::from_parts(1_593_881, 442) - // Standard Error: 1_135 - .saturating_add(Weight::from_parts(1_109_302, 0).saturating_mul(k.into())) + // Minimum execution time: 12_173_000 picoseconds. + Weight::from_parts(12_515_000, 442) + // Standard Error: 1_033 + .saturating_add(Weight::from_parts(1_075_765, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -169,8 +171,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211 + c * (1 Β±0)` // Estimated: `6149 + c * (1 Β±0)` - // Minimum execution time: 8_176_000 picoseconds. - Weight::from_parts(8_555_388, 6149) + // Minimum execution time: 8_054_000 picoseconds. + Weight::from_parts(8_503_485, 6149) // Standard Error: 1 .saturating_add(Weight::from_parts(1_184, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) @@ -185,8 +187,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_270_000 picoseconds. - Weight::from_parts(16_779_000, 6450) + // Minimum execution time: 16_966_000 picoseconds. + Weight::from_parts(17_445_000, 6450) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -199,10 +201,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `171 + k * (1 Β±0)` // Estimated: `3635 + k * (1 Β±0)` - // Minimum execution time: 3_572_000 picoseconds. - Weight::from_parts(1_950_905, 3635) - // Standard Error: 1_597 - .saturating_add(Weight::from_parts(1_123_190, 0).saturating_mul(k.into())) + // Minimum execution time: 3_643_000 picoseconds. + Weight::from_parts(3_714_000, 3635) + // Standard Error: 1_056 + .saturating_add(Weight::from_parts(1_089_042, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -212,6 +214,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:0 w:1) @@ -219,13 +223,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 125952]`. fn v12_migration_step(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `325 + c * (1 Β±0)` - // Estimated: `6263 + c * (1 Β±0)` - // Minimum execution time: 16_873_000 picoseconds. - Weight::from_parts(16_790_402, 6263) + // Measured: `328 + c * (1 Β±0)` + // Estimated: `6266 + c * (1 Β±0)` + // Minimum execution time: 19_891_000 picoseconds. + Weight::from_parts(19_961_608, 6266) // Standard Error: 1 - .saturating_add(Weight::from_parts(396, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(Weight::from_parts(429, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } @@ -235,8 +239,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 11_904_000 picoseconds. - Weight::from_parts(12_785_000, 6380) + // Minimum execution time: 12_483_000 picoseconds. + Weight::from_parts(13_205_000, 6380) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -245,13 +249,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) fn v14_migration_step() -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 44_920_000 picoseconds. - Weight::from_parts(46_163_000, 6292) + // Minimum execution time: 42_443_000 picoseconds. + Weight::from_parts(43_420_000, 6292) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -263,8 +267,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 53_864_000 picoseconds. - Weight::from_parts(55_139_000, 6534) + // Minimum execution time: 52_282_000 picoseconds. + Weight::from_parts(54_110_000, 6534) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -274,8 +278,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_375_000 picoseconds. - Weight::from_parts(2_487_000, 1627) + // Minimum execution time: 2_539_000 picoseconds. + Weight::from_parts(2_644_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -287,8 +291,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 11_580_000 picoseconds. - Weight::from_parts(11_980_000, 3631) + // Minimum execution time: 9_473_000 picoseconds. + Weight::from_parts(9_907_000, 3631) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -298,8 +302,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_557_000 picoseconds. - Weight::from_parts(4_807_000, 3607) + // Minimum execution time: 3_532_000 picoseconds. + Weight::from_parts(3_768_000, 3607) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -310,8 +314,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 6_253_000 picoseconds. - Weight::from_parts(6_479_000, 3632) + // Minimum execution time: 5_036_000 picoseconds. + Weight::from_parts(5_208_000, 3632) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -322,13 +326,15 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_166_000 picoseconds. - Weight::from_parts(6_545_000, 3607) + // Minimum execution time: 4_881_000 picoseconds. + Weight::from_parts(5_149_000, 3607) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -344,22 +350,24 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `801 + c * (1 Β±0)` - // Estimated: `6739 + c * (1 Β±0)` - // Minimum execution time: 282_232_000 picoseconds. - Weight::from_parts(266_148_573, 6739) - // Standard Error: 69 - .saturating_add(Weight::from_parts(34_592, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `804 + c * (1 Β±0)` + // Estimated: `9217 + c * (1 Β±0)` + // Minimum execution time: 280_047_000 picoseconds. + Weight::from_parts(270_065_882, 9217) + // Standard Error: 86 + .saturating_add(Weight::from_parts(34_361, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:3 w:3) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) @@ -377,17 +385,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `323` - // Estimated: `8737` - // Minimum execution time: 3_760_879_000 picoseconds. - Weight::from_parts(794_812_431, 8737) + // Measured: `326` + // Estimated: `8740` + // Minimum execution time: 3_776_071_000 picoseconds. + Weight::from_parts(706_212_213, 8740) // Standard Error: 149 - .saturating_add(Weight::from_parts(101_881, 0).saturating_mul(c.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_404, 0).saturating_mul(i.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_544, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + .saturating_add(Weight::from_parts(98_798, 0).saturating_mul(c.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(1_560, 0).saturating_mul(i.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(1_476, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -396,6 +404,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -407,24 +417,26 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `560` - // Estimated: `6504` - // Minimum execution time: 1_953_162_000 picoseconds. - Weight::from_parts(374_252_840, 6504) + // Measured: `563` + // Estimated: `8982` + // Minimum execution time: 1_966_301_000 picoseconds. + Weight::from_parts(376_287_434, 8982) // Standard Error: 7 - .saturating_add(Weight::from_parts(1_630, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(1_643, 0).saturating_mul(i.into())) // Standard Error: 7 - .saturating_add(Weight::from_parts(1_650, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(Weight::from_parts(1_667, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -439,19 +451,21 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `826` - // Estimated: `6766` - // Minimum execution time: 187_899_000 picoseconds. - Weight::from_parts(195_510_000, 6766) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `829` + // Estimated: `9244` + // Minimum execution time: 197_571_000 picoseconds. + Weight::from_parts(206_612_000, 9244) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:1 w:1) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) @@ -459,13 +473,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 125952]`. fn upload_code(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 254_800_000 picoseconds. - Weight::from_parts(285_603_050, 3607) - // Standard Error: 62 - .saturating_add(Weight::from_parts(66_212, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Measured: `145` + // Estimated: `6085` + // Minimum execution time: 266_006_000 picoseconds. + Weight::from_parts(287_700_788, 6085) + // Standard Error: 67 + .saturating_add(Weight::from_parts(63_196, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -473,7 +487,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:1 w:1) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) @@ -482,8 +496,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 43_553_000 picoseconds. - Weight::from_parts(45_036_000, 3780) + // Minimum execution time: 42_714_000 picoseconds. + Weight::from_parts(44_713_000, 3780) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -499,8 +513,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `552` // Estimated: `8967` - // Minimum execution time: 33_223_000 picoseconds. - Weight::from_parts(34_385_000, 8967) + // Minimum execution time: 33_516_000 picoseconds. + Weight::from_parts(34_823_000, 8967) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -508,6 +522,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -521,13 +537,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_caller(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `866 + r * (6 Β±0)` - // Estimated: `6806 + r * (6 Β±0)` - // Minimum execution time: 254_213_000 picoseconds. - Weight::from_parts(273_464_980, 6806) - // Standard Error: 1_362 - .saturating_add(Weight::from_parts(322_619, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `869 + r * (6 Β±0)` + // Estimated: `9284 + r * (6 Β±0)` + // Minimum execution time: 246_563_000 picoseconds. + Weight::from_parts(274_999_814, 9284) + // Standard Error: 628 + .saturating_add(Weight::from_parts(347_395, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -535,6 +551,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -548,13 +566,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_is_contract(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `922 + r * (209 Β±0)` - // Estimated: `6826 + r * (2684 Β±0)` - // Minimum execution time: 250_273_000 picoseconds. - Weight::from_parts(122_072_782, 6826) - // Standard Error: 5_629 - .saturating_add(Weight::from_parts(3_490_256, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `925 + r * (209 Β±0)` + // Estimated: `9304 + r * (2684 Β±0)` + // Minimum execution time: 252_236_000 picoseconds. + Weight::from_parts(121_390_081, 9304) + // Standard Error: 6_206 + .saturating_add(Weight::from_parts(3_558_718, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 2684).saturating_mul(r.into())) @@ -563,6 +581,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -576,13 +596,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (213 Β±0)` - // Estimated: `6830 + r * (2688 Β±0)` - // Minimum execution time: 255_187_000 picoseconds. - Weight::from_parts(118_082_505, 6830) - // Standard Error: 6_302 - .saturating_add(Weight::from_parts(4_246_968, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `924 + r * (213 Β±0)` + // Estimated: `9308 + r * (2688 Β±0)` + // Minimum execution time: 269_427_000 picoseconds. + Weight::from_parts(134_879_389, 9308) + // Standard Error: 6_711 + .saturating_add(Weight::from_parts(4_408_658, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 2688).saturating_mul(r.into())) @@ -591,6 +611,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -604,13 +626,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_own_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `873 + r * (6 Β±0)` - // Estimated: `6815 + r * (6 Β±0)` - // Minimum execution time: 256_833_000 picoseconds. - Weight::from_parts(273_330_216, 6815) - // Standard Error: 881 - .saturating_add(Weight::from_parts(400_105, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `876 + r * (6 Β±0)` + // Estimated: `9293 + r * (6 Β±0)` + // Minimum execution time: 249_382_000 picoseconds. + Weight::from_parts(266_305_110, 9293) + // Standard Error: 1_592 + .saturating_add(Weight::from_parts(460_001, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -618,6 +640,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -631,18 +655,20 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_origin(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (3 Β±0)` - // Estimated: `6804 + r * (3 Β±0)` - // Minimum execution time: 244_193_000 picoseconds. - Weight::from_parts(271_221_908, 6804) - // Standard Error: 442 - .saturating_add(Weight::from_parts(176_480, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `866 + r * (3 Β±0)` + // Estimated: `9282 + r * (3 Β±0)` + // Minimum execution time: 252_145_000 picoseconds. + Weight::from_parts(277_211_668, 9282) + // Standard Error: 371 + .saturating_add(Weight::from_parts(174_394, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -656,13 +682,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_root(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `753 + r * (3 Β±0)` - // Estimated: `6693 + r * (3 Β±0)` - // Minimum execution time: 232_603_000 picoseconds. - Weight::from_parts(260_577_368, 6693) - // Standard Error: 365 - .saturating_add(Weight::from_parts(158_126, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Measured: `756 + r * (3 Β±0)` + // Estimated: `9171 + r * (3 Β±0)` + // Minimum execution time: 251_595_000 picoseconds. + Weight::from_parts(268_102_575, 9171) + // Standard Error: 334 + .saturating_add(Weight::from_parts(154_836, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -670,6 +696,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -683,13 +711,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `867 + r * (6 Β±0)` - // Estimated: `6807 + r * (6 Β±0)` - // Minimum execution time: 247_564_000 picoseconds. - Weight::from_parts(275_108_914, 6807) - // Standard Error: 505 - .saturating_add(Weight::from_parts(315_065, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `870 + r * (6 Β±0)` + // Estimated: `9285 + r * (6 Β±0)` + // Minimum execution time: 251_404_000 picoseconds. + Weight::from_parts(278_747_574, 9285) + // Standard Error: 782 + .saturating_add(Weight::from_parts(341_598, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -697,6 +725,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -710,13 +740,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_gas_left(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (6 Β±0)` - // Estimated: `6806 + r * (6 Β±0)` - // Minimum execution time: 258_799_000 picoseconds. - Weight::from_parts(274_338_256, 6806) - // Standard Error: 632 - .saturating_add(Weight::from_parts(355_032, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `866 + r * (6 Β±0)` + // Estimated: `9284 + r * (6 Β±0)` + // Minimum execution time: 255_649_000 picoseconds. + Weight::from_parts(276_509_641, 9284) + // Standard Error: 1_262 + .saturating_add(Weight::from_parts(380_225, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -724,6 +754,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:2 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -737,13 +769,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1007 + r * (6 Β±0)` - // Estimated: `6931 + r * (6 Β±0)` - // Minimum execution time: 253_335_000 picoseconds. - Weight::from_parts(273_013_859, 6931) - // Standard Error: 2_007 - .saturating_add(Weight::from_parts(1_540_735, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1010 + r * (6 Β±0)` + // Estimated: `9409 + r * (6 Β±0)` + // Minimum execution time: 267_143_000 picoseconds. + Weight::from_parts(298_166_116, 9409) + // Standard Error: 3_765 + .saturating_add(Weight::from_parts(1_620_886, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -751,6 +783,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -764,13 +798,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_value_transferred(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `877 + r * (6 Β±0)` - // Estimated: `6823 + r * (6 Β±0)` - // Minimum execution time: 252_325_000 picoseconds. - Weight::from_parts(274_733_944, 6823) - // Standard Error: 603 - .saturating_add(Weight::from_parts(314_467, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `880 + r * (6 Β±0)` + // Estimated: `9301 + r * (6 Β±0)` + // Minimum execution time: 250_013_000 picoseconds. + Weight::from_parts(281_469_583, 9301) + // Standard Error: 730 + .saturating_add(Weight::from_parts(339_260, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -778,6 +812,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -791,13 +827,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_minimum_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `875 + r * (6 Β±0)` - // Estimated: `6816 + r * (6 Β±0)` - // Minimum execution time: 250_698_000 picoseconds. - Weight::from_parts(271_707_578, 6816) - // Standard Error: 952 - .saturating_add(Weight::from_parts(318_412, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `878 + r * (6 Β±0)` + // Estimated: `9294 + r * (6 Β±0)` + // Minimum execution time: 256_219_000 picoseconds. + Weight::from_parts(275_309_266, 9294) + // Standard Error: 1_199 + .saturating_add(Weight::from_parts(350_824, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -805,6 +841,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -818,13 +856,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_block_number(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872 + r * (6 Β±0)` - // Estimated: `6819 + r * (6 Β±0)` - // Minimum execution time: 251_854_000 picoseconds. - Weight::from_parts(272_002_212, 6819) - // Standard Error: 622 - .saturating_add(Weight::from_parts(313_353, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `875 + r * (6 Β±0)` + // Estimated: `9297 + r * (6 Β±0)` + // Minimum execution time: 264_644_000 picoseconds. + Weight::from_parts(283_856_744, 9297) + // Standard Error: 623 + .saturating_add(Weight::from_parts(334_175, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -832,6 +870,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -845,13 +885,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_now(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (6 Β±0)` - // Estimated: `6804 + r * (6 Β±0)` - // Minimum execution time: 252_010_000 picoseconds. - Weight::from_parts(270_387_000, 6804) - // Standard Error: 659 - .saturating_add(Weight::from_parts(325_856, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `866 + r * (6 Β±0)` + // Estimated: `9282 + r * (6 Β±0)` + // Minimum execution time: 263_364_000 picoseconds. + Weight::from_parts(281_379_508, 9282) + // Standard Error: 639 + .saturating_add(Weight::from_parts(338_021, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -859,6 +899,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -874,13 +916,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_weight_to_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `937 + r * (14 Β±0)` - // Estimated: `6872 + r * (14 Β±0)` - // Minimum execution time: 247_933_000 picoseconds. - Weight::from_parts(281_550_162, 6872) - // Standard Error: 660 - .saturating_add(Weight::from_parts(1_090_869, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `940 + r * (14 Β±0)` + // Estimated: `9350 + r * (14 Β±0)` + // Minimum execution time: 269_667_000 picoseconds. + Weight::from_parts(284_662_802, 9350) + // Standard Error: 691 + .saturating_add(Weight::from_parts(799_249, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 14).saturating_mul(r.into())) } @@ -888,6 +930,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -901,13 +945,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_input(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `865 + r * (6 Β±0)` - // Estimated: `6807 + r * (6 Β±0)` - // Minimum execution time: 251_158_000 picoseconds. - Weight::from_parts(274_623_152, 6807) - // Standard Error: 491 - .saturating_add(Weight::from_parts(263_916, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `868 + r * (6 Β±0)` + // Estimated: `9285 + r * (6 Β±0)` + // Minimum execution time: 267_018_000 picoseconds. + Weight::from_parts(281_842_630, 9285) + // Standard Error: 453 + .saturating_add(Weight::from_parts(255_373, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -915,6 +959,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -928,19 +974,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_input_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `869` - // Estimated: `6809` - // Minimum execution time: 263_205_000 picoseconds. - Weight::from_parts(216_792_893, 6809) + // Measured: `872` + // Estimated: `9287` + // Minimum execution time: 258_208_000 picoseconds. + Weight::from_parts(227_686_792, 9287) // Standard Error: 23 .saturating_add(Weight::from_parts(989, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -954,11 +1002,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `853 + r * (45 Β±0)` - // Estimated: `6793 + r * (45 Β±0)` - // Minimum execution time: 239_663_000 picoseconds. - Weight::from_parts(266_124_565, 6793) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `856 + r * (45 Β±0)` + // Estimated: `9271 + r * (45 Β±0)` + // Minimum execution time: 245_714_000 picoseconds. + Weight::from_parts(272_527_059, 9271) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(r.into())) } @@ -966,6 +1014,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -979,19 +1029,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_return_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863` - // Estimated: `6810` - // Minimum execution time: 241_763_000 picoseconds. - Weight::from_parts(266_535_552, 6810) + // Measured: `866` + // Estimated: `9288` + // Minimum execution time: 256_060_000 picoseconds. + Weight::from_parts(276_710_811, 9288) // Standard Error: 0 - .saturating_add(Weight::from_parts(320, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(312, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:1 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:2) @@ -1005,28 +1057,30 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `Contracts::DeletionQueue` (r:0 w:1) /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2972 + r * (316 Β±0)` - // Estimated: `8912 + r * (5266 Β±0)` - // Minimum execution time: 265_888_000 picoseconds. - Weight::from_parts(291_232_232, 8912) - // Standard Error: 845_475 - .saturating_add(Weight::from_parts(104_398_867, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(r.into()))) + // Measured: `2902 + r * (529 Β±0)` + // Estimated: `11317 + r * (5479 Β±0)` + // Minimum execution time: 270_479_000 picoseconds. + Weight::from_parts(296_706_989, 11317) + // Standard Error: 813_407 + .saturating_add(Weight::from_parts(109_236_910, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) + .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((10_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 5266).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 5479).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1042,13 +1096,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_random(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `944 + r * (10 Β±0)` - // Estimated: `6885 + r * (10 Β±0)` - // Minimum execution time: 248_500_000 picoseconds. - Weight::from_parts(282_353_053, 6885) - // Standard Error: 1_144 - .saturating_add(Weight::from_parts(1_193_841, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `947 + r * (10 Β±0)` + // Estimated: `9363 + r * (10 Β±0)` + // Minimum execution time: 251_787_000 picoseconds. + Weight::from_parts(284_036_313, 9363) + // Standard Error: 2_560 + .saturating_add(Weight::from_parts(1_238_301, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } @@ -1056,6 +1110,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1069,13 +1125,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_deposit_event(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (10 Β±0)` - // Estimated: `6805 + r * (10 Β±0)` - // Minimum execution time: 248_130_000 picoseconds. - Weight::from_parts(279_583_178, 6805) - // Standard Error: 971 - .saturating_add(Weight::from_parts(1_987_941, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `866 + r * (10 Β±0)` + // Estimated: `9283 + r * (10 Β±0)` + // Minimum execution time: 250_566_000 picoseconds. + Weight::from_parts(275_402_784, 9283) + // Standard Error: 3_939 + .saturating_add(Weight::from_parts(1_941_995, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } @@ -1083,6 +1139,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1097,15 +1155,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `880 + t * (32 Β±0)` - // Estimated: `6825 + t * (2508 Β±0)` - // Minimum execution time: 258_594_000 picoseconds. - Weight::from_parts(276_734_422, 6825) - // Standard Error: 102_093 - .saturating_add(Weight::from_parts(2_559_383, 0).saturating_mul(t.into())) + // Measured: `883 + t * (32 Β±0)` + // Estimated: `9303 + t * (2508 Β±0)` + // Minimum execution time: 267_544_000 picoseconds. + Weight::from_parts(280_117_825, 9303) + // Standard Error: 102_111 + .saturating_add(Weight::from_parts(3_253_038, 0).saturating_mul(t.into())) // Standard Error: 28 - .saturating_add(Weight::from_parts(501, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(668, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) @@ -1115,6 +1173,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1128,13 +1188,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_debug_message(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `862 + r * (7 Β±0)` - // Estimated: `6807 + r * (7 Β±0)` - // Minimum execution time: 154_564_000 picoseconds. - Weight::from_parts(168_931_365, 6807) - // Standard Error: 349 - .saturating_add(Weight::from_parts(226_848, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `865 + r * (7 Β±0)` + // Estimated: `9285 + r * (7 Β±0)` + // Minimum execution time: 157_769_000 picoseconds. + Weight::from_parts(173_026_565, 9285) + // Standard Error: 405 + .saturating_add(Weight::from_parts(219_727, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) } @@ -1142,6 +1202,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `MaxEncodedLen`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1155,13 +1217,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 1048576]`. fn seal_debug_message_per_byte(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `125813` - // Estimated: `131755` - // Minimum execution time: 394_382_000 picoseconds. - Weight::from_parts(376_780_500, 131755) + // Measured: `125816` + // Estimated: `131758` + // Minimum execution time: 403_961_000 picoseconds. + Weight::from_parts(376_480_872, 131758) // Standard Error: 12 - .saturating_add(Weight::from_parts(1_026, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_041, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1169,13 +1231,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_set_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `924 + r * (292 Β±0)` - // Estimated: `926 + r * (293 Β±0)` - // Minimum execution time: 249_757_000 picoseconds. - Weight::from_parts(177_324_374, 926) - // Standard Error: 9_512 - .saturating_add(Weight::from_parts(6_176_717, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `927 + r * (292 Β±0)` + // Estimated: `929 + r * (293 Β±0)` + // Minimum execution time: 256_272_000 picoseconds. + Weight::from_parts(181_058_379, 929) + // Standard Error: 9_838 + .saturating_add(Weight::from_parts(6_316_677, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1186,13 +1248,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_new_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1447` - // Estimated: `1430` - // Minimum execution time: 267_564_000 picoseconds. - Weight::from_parts(328_701_080, 1430) - // Standard Error: 61 - .saturating_add(Weight::from_parts(576, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) + // Measured: `1450` + // Estimated: `1433` + // Minimum execution time: 268_713_000 picoseconds. + Weight::from_parts(328_329_131, 1433) + // Standard Error: 66 + .saturating_add(Weight::from_parts(962, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1200,13 +1262,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_old_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1253 + n * (1 Β±0)` - // Estimated: `1253 + n * (1 Β±0)` - // Minimum execution time: 266_347_000 picoseconds. - Weight::from_parts(289_824_718, 1253) - // Standard Error: 34 - .saturating_add(Weight::from_parts(184, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1256 + n * (1 Β±0)` + // Estimated: `1256 + n * (1 Β±0)` + // Minimum execution time: 265_683_000 picoseconds. + Weight::from_parts(293_784_477, 1256) + // Standard Error: 31 + .saturating_add(Weight::from_parts(377, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1215,13 +1277,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_clear_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (288 Β±0)` - // Estimated: `927 + r * (289 Β±0)` - // Minimum execution time: 247_207_000 picoseconds. - Weight::from_parts(179_856_075, 927) - // Standard Error: 9_383 - .saturating_add(Weight::from_parts(6_053_198, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `924 + r * (288 Β±0)` + // Estimated: `930 + r * (289 Β±0)` + // Minimum execution time: 269_959_000 picoseconds. + Weight::from_parts(186_065_911, 930) + // Standard Error: 9_679 + .saturating_add(Weight::from_parts(6_286_855, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1232,13 +1294,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_clear_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1249 + n * (1 Β±0)` - // Estimated: `1249 + n * (1 Β±0)` - // Minimum execution time: 262_655_000 picoseconds. - Weight::from_parts(289_482_543, 1249) - // Standard Error: 35 - .saturating_add(Weight::from_parts(92, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1252 + n * (1 Β±0)` + // Estimated: `1252 + n * (1 Β±0)` + // Minimum execution time: 265_805_000 picoseconds. + Weight::from_parts(294_633_695, 1252) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1247,13 +1307,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_get_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (296 Β±0)` - // Estimated: `923 + r * (297 Β±0)` - // Minimum execution time: 247_414_000 picoseconds. - Weight::from_parts(203_317_182, 923) - // Standard Error: 7_191 - .saturating_add(Weight::from_parts(4_925_154, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `924 + r * (296 Β±0)` + // Estimated: `926 + r * (297 Β±0)` + // Minimum execution time: 264_592_000 picoseconds. + Weight::from_parts(204_670_461, 926) + // Standard Error: 6_869 + .saturating_add(Weight::from_parts(5_137_920, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) @@ -1263,13 +1323,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_get_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1265 + n * (1 Β±0)` - // Estimated: `1265 + n * (1 Β±0)` - // Minimum execution time: 258_910_000 picoseconds. - Weight::from_parts(283_086_514, 1265) - // Standard Error: 39 - .saturating_add(Weight::from_parts(980, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1268 + n * (1 Β±0)` + // Estimated: `1268 + n * (1 Β±0)` + // Minimum execution time: 273_165_000 picoseconds. + Weight::from_parts(297_883_669, 1268) + // Standard Error: 37 + .saturating_add(Weight::from_parts(576, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1278,13 +1338,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_contains_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `932 + r * (288 Β±0)` - // Estimated: `929 + r * (289 Β±0)` - // Minimum execution time: 252_410_000 picoseconds. - Weight::from_parts(201_227_879, 929) - // Standard Error: 6_899 - .saturating_add(Weight::from_parts(4_774_983, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `935 + r * (288 Β±0)` + // Estimated: `932 + r * (289 Β±0)` + // Minimum execution time: 252_257_000 picoseconds. + Weight::from_parts(205_018_595, 932) + // Standard Error: 7_605 + .saturating_add(Weight::from_parts(4_933_378, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) @@ -1294,13 +1354,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_contains_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1252 + n * (1 Β±0)` - // Estimated: `1252 + n * (1 Β±0)` - // Minimum execution time: 259_053_000 picoseconds. - Weight::from_parts(283_392_084, 1252) - // Standard Error: 41 - .saturating_add(Weight::from_parts(213, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1255 + n * (1 Β±0)` + // Estimated: `1255 + n * (1 Β±0)` + // Minimum execution time: 266_839_000 picoseconds. + Weight::from_parts(292_064_487, 1255) + // Standard Error: 34 + .saturating_add(Weight::from_parts(194, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1309,13 +1369,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_take_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `914 + r * (296 Β±0)` - // Estimated: `919 + r * (297 Β±0)` - // Minimum execution time: 251_371_000 picoseconds. - Weight::from_parts(177_119_717, 919) - // Standard Error: 9_421 - .saturating_add(Weight::from_parts(6_226_005, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `917 + r * (296 Β±0)` + // Estimated: `922 + r * (297 Β±0)` + // Minimum execution time: 266_072_000 picoseconds. + Weight::from_parts(189_018_352, 922) + // Standard Error: 9_530 + .saturating_add(Weight::from_parts(6_481_011, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1326,13 +1386,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 16384]`. fn seal_take_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1266 + n * (1 Β±0)` - // Estimated: `1266 + n * (1 Β±0)` - // Minimum execution time: 263_350_000 picoseconds. - Weight::from_parts(284_323_917, 1266) - // Standard Error: 31 - .saturating_add(Weight::from_parts(921, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1269 + n * (1 Β±0)` + // Estimated: `1269 + n * (1 Β±0)` + // Minimum execution time: 270_703_000 picoseconds. + Weight::from_parts(292_867_105, 1269) + // Standard Error: 30 + .saturating_add(Weight::from_parts(800, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1340,6 +1400,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1602 w:1601) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1353,13 +1415,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_transfer(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1415 + r * (45 Β±0)` - // Estimated: `7307 + r * (2520 Β±0)` - // Minimum execution time: 248_701_000 picoseconds. - Weight::from_parts(17_811_969, 7307) - // Standard Error: 35_154 - .saturating_add(Weight::from_parts(31_809_738, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1418 + r * (45 Β±0)` + // Estimated: `9785 + r * (2520 Β±0)` + // Minimum execution time: 251_843_000 picoseconds. + Weight::from_parts(181_026_888, 9785) + // Standard Error: 38_221 + .saturating_add(Weight::from_parts(30_626_681, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1369,6 +1431,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) @@ -1382,13 +1446,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 800]`. fn seal_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1260 + r * (245 Β±0)` - // Estimated: `9440 + r * (2721 Β±0)` - // Minimum execution time: 247_335_000 picoseconds. - Weight::from_parts(264_025_000, 9440) - // Standard Error: 121_299 - .saturating_add(Weight::from_parts(234_770_827, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `1263 + r * (245 Β±0)` + // Estimated: `9635 + r * (2721 Β±0)` + // Minimum execution time: 254_881_000 picoseconds. + Weight::from_parts(272_871_000, 9635) + // Standard Error: 94_527 + .saturating_add(Weight::from_parts(233_379_497, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) @@ -1398,6 +1462,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:736 w:0) @@ -1412,12 +1478,12 @@ impl WeightInfo for SubstrateWeight { fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (576 Β±0)` - // Estimated: `6812 + r * (2637 Β±3)` - // Minimum execution time: 261_011_000 picoseconds. - Weight::from_parts(264_554_000, 6812) - // Standard Error: 104_415 - .saturating_add(Weight::from_parts(231_627_084, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Estimated: `9290 + r * (2637 Β±3)` + // Minimum execution time: 266_704_000 picoseconds. + Weight::from_parts(273_165_000, 9290) + // Standard Error: 141_486 + .saturating_add(Weight::from_parts(232_947_049, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1427,6 +1493,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) @@ -1441,15 +1509,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1048576]`. fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1307 + t * (277 Β±0)` - // Estimated: `12197 + t * (5227 Β±0)` - // Minimum execution time: 445_561_000 picoseconds. - Weight::from_parts(62_287_490, 12197) - // Standard Error: 11_797_697 - .saturating_add(Weight::from_parts(357_530_529, 0).saturating_mul(t.into())) + // Measured: `1310 + t * (277 Β±0)` + // Estimated: `12200 + t * (5227 Β±0)` + // Minimum execution time: 446_451_000 picoseconds. + Weight::from_parts(68_175_222, 12200) + // Standard Error: 11_619_250 + .saturating_add(Weight::from_parts(354_237_260, 0).saturating_mul(t.into())) // Standard Error: 17 - .saturating_add(Weight::from_parts(970, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(Weight::from_parts(987, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(16_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(t.into()))) @@ -1459,6 +1527,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:802 w:802) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:801 w:800) @@ -1472,17 +1542,17 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::EventTopics` (r:803 w:803) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:800 w:800) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `r` is `[1, 800]`. fn seal_instantiate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1278 + r * (255 Β±0)` - // Estimated: `9620 + r * (2731 Β±0)` - // Minimum execution time: 621_897_000 picoseconds. - Weight::from_parts(631_687_000, 9620) - // Standard Error: 215_241 - .saturating_add(Weight::from_parts(350_527_831, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `1281 + r * (255 Β±0)` + // Estimated: `9623 + r * (2731 Β±0)` + // Minimum execution time: 628_156_000 picoseconds. + Weight::from_parts(642_052_000, 9623) + // Standard Error: 223_957 + .saturating_add(Weight::from_parts(348_660_264, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(7_u64)) .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(r.into()))) @@ -1492,6 +1562,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:1) @@ -1505,23 +1577,23 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 983040]`. /// The range of component `s` is `[0, 983040]`. fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1303 + t * (104 Β±0)` - // Estimated: `12211 + t * (2549 Β±1)` - // Minimum execution time: 2_181_184_000 picoseconds. - Weight::from_parts(1_194_190_111, 12211) - // Standard Error: 11_578_766 - .saturating_add(Weight::from_parts(6_361_884, 0).saturating_mul(t.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_025, 0).saturating_mul(i.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_158, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(16_u64)) + // Measured: `1306 + t * (104 Β±0)` + // Estimated: `12214 + t * (2549 Β±1)` + // Minimum execution time: 2_106_379_000 picoseconds. + Weight::from_parts(1_098_227_098, 12214) + // Standard Error: 12_549_729 + .saturating_add(Weight::from_parts(21_628_382, 0).saturating_mul(t.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_059, 0).saturating_mul(i.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_220, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(19_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) @@ -1531,6 +1603,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1544,13 +1618,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `862 + r * (8 Β±0)` - // Estimated: `6801 + r * (8 Β±0)` - // Minimum execution time: 241_609_000 picoseconds. - Weight::from_parts(268_716_874, 6801) - // Standard Error: 617 - .saturating_add(Weight::from_parts(377_753, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `865 + r * (8 Β±0)` + // Estimated: `9279 + r * (8 Β±0)` + // Minimum execution time: 257_102_000 picoseconds. + Weight::from_parts(278_920_692, 9279) + // Standard Error: 464 + .saturating_add(Weight::from_parts(374_120, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -1558,6 +1632,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1571,19 +1647,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `870` - // Estimated: `6808` - // Minimum execution time: 261_296_000 picoseconds. - Weight::from_parts(255_531_654, 6808) + // Measured: `873` + // Estimated: `9286` + // Minimum execution time: 266_609_000 picoseconds. + Weight::from_parts(263_034_132, 9286) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_081, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_082, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1597,13 +1675,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 Β±0)` - // Estimated: `6806 + r * (8 Β±0)` - // Minimum execution time: 243_583_000 picoseconds. - Weight::from_parts(270_025_058, 6806) - // Standard Error: 560 - .saturating_add(Weight::from_parts(767_519, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 Β±0)` + // Estimated: `9284 + r * (8 Β±0)` + // Minimum execution time: 246_275_000 picoseconds. + Weight::from_parts(279_091_594, 9284) + // Standard Error: 887 + .saturating_add(Weight::from_parts(810_394, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -1611,6 +1689,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1624,19 +1704,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6814` - // Minimum execution time: 253_798_000 picoseconds. - Weight::from_parts(265_542_351, 6814) + // Measured: `875` + // Estimated: `9292` + // Minimum execution time: 249_254_000 picoseconds. + Weight::from_parts(270_833_823, 9292) // Standard Error: 0 - .saturating_add(Weight::from_parts(3_343, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(3_347, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1650,13 +1732,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 Β±0)` - // Estimated: `6808 + r * (8 Β±0)` - // Minimum execution time: 247_332_000 picoseconds. - Weight::from_parts(269_183_656, 6808) - // Standard Error: 665 - .saturating_add(Weight::from_parts(443_386, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 Β±0)` + // Estimated: `9286 + r * (8 Β±0)` + // Minimum execution time: 259_131_000 picoseconds. + Weight::from_parts(272_665_020, 9286) + // Standard Error: 862 + .saturating_add(Weight::from_parts(451_247, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -1664,6 +1746,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1677,19 +1761,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6813` - // Minimum execution time: 250_855_000 picoseconds. - Weight::from_parts(258_752_975, 6813) + // Measured: `875` + // Estimated: `9291` + // Minimum execution time: 276_465_000 picoseconds. + Weight::from_parts(269_450_297, 9291) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_202, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_190, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1703,13 +1789,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 Β±0)` - // Estimated: `6805 + r * (8 Β±0)` - // Minimum execution time: 240_733_000 picoseconds. - Weight::from_parts(269_134_358, 6805) - // Standard Error: 512 - .saturating_add(Weight::from_parts(440_043, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 Β±0)` + // Estimated: `9283 + r * (8 Β±0)` + // Minimum execution time: 248_349_000 picoseconds. + Weight::from_parts(273_660_686, 9283) + // Standard Error: 656 + .saturating_add(Weight::from_parts(444_293, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -1717,6 +1803,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1730,19 +1818,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6811` - // Minimum execution time: 247_377_000 picoseconds. - Weight::from_parts(261_077_322, 6811) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_195, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `875` + // Estimated: `9289` + // Minimum execution time: 252_107_000 picoseconds. + Weight::from_parts(267_427_726, 9289) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_199, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1756,13 +1846,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `997 + n * (1 Β±0)` - // Estimated: `6934 + n * (1 Β±0)` - // Minimum execution time: 307_337_000 picoseconds. - Weight::from_parts(326_710_473, 6934) - // Standard Error: 9 - .saturating_add(Weight::from_parts(5_765, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `1000 + n * (1 Β±0)` + // Estimated: `9412 + n * (1 Β±0)` + // Minimum execution time: 311_252_000 picoseconds. + Weight::from_parts(333_250_773, 9412) + // Standard Error: 12 + .saturating_add(Weight::from_parts(5_668, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1770,6 +1860,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1783,13 +1875,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 160]`. fn seal_sr25519_verify(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `805 + r * (112 Β±0)` - // Estimated: `6748 + r * (112 Β±0)` - // Minimum execution time: 245_432_000 picoseconds. - Weight::from_parts(294_206_377, 6748) - // Standard Error: 7_229 - .saturating_add(Weight::from_parts(41_480_485, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `810 + r * (112 Β±0)` + // Estimated: `9226 + r * (112 Β±0)` + // Minimum execution time: 267_431_000 picoseconds. + Weight::from_parts(300_733_048, 9226) + // Standard Error: 8_806 + .saturating_add(Weight::from_parts(42_169_197, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(r.into())) } @@ -1797,6 +1889,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1810,13 +1904,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `907 + r * (76 Β±0)` - // Estimated: `6802 + r * (77 Β±0)` - // Minimum execution time: 247_788_000 picoseconds. - Weight::from_parts(303_940_062, 6802) - // Standard Error: 10_671 - .saturating_add(Weight::from_parts(45_730_772, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `910 + r * (76 Β±0)` + // Estimated: `9279 + r * (77 Β±0)` + // Minimum execution time: 265_905_000 picoseconds. + Weight::from_parts(305_381_951, 9279) + // Standard Error: 9_863 + .saturating_add(Weight::from_parts(45_771_182, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 77).saturating_mul(r.into())) } @@ -1824,6 +1918,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1837,13 +1933,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `877 + r * (42 Β±0)` - // Estimated: `6816 + r * (42 Β±0)` - // Minimum execution time: 248_825_000 picoseconds. - Weight::from_parts(286_832_225, 6816) - // Standard Error: 5_274 - .saturating_add(Weight::from_parts(11_889_262, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `880 + r * (42 Β±0)` + // Estimated: `9294 + r * (42 Β±0)` + // Minimum execution time: 266_701_000 picoseconds. + Weight::from_parts(292_407_888, 9294) + // Standard Error: 5_394 + .saturating_add(Weight::from_parts(11_896_065, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 42).saturating_mul(r.into())) } @@ -1851,6 +1947,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) @@ -1865,12 +1963,12 @@ impl WeightInfo for SubstrateWeight { fn seal_set_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (965 Β±0)` - // Estimated: `6807 + r * (3090 Β±7)` - // Minimum execution time: 244_982_000 picoseconds. - Weight::from_parts(265_297_000, 6807) - // Standard Error: 39_895 - .saturating_add(Weight::from_parts(22_435_888, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Estimated: `9285 + r * (3090 Β±7)` + // Minimum execution time: 268_755_000 picoseconds. + Weight::from_parts(272_300_000, 9285) + // Standard Error: 43_747 + .saturating_add(Weight::from_parts(25_001_889, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) @@ -1880,6 +1978,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) @@ -1893,22 +1993,24 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 32]`. fn lock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `928 + r * (131 Β±0)` - // Estimated: `6878 + r * (2606 Β±0)` - // Minimum execution time: 246_455_000 picoseconds. - Weight::from_parts(275_334_919, 6878) - // Standard Error: 20_911 - .saturating_add(Weight::from_parts(6_427_525, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `937 + r * (131 Β±0)` + // Estimated: `9346 + r * (2607 Β±0)` + // Minimum execution time: 255_190_000 picoseconds. + Weight::from_parts(285_643_922, 9346) + // Standard Error: 23_582 + .saturating_add(Weight::from_parts(6_289_940, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2606).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2607).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `MaxEncodedLen`) /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) @@ -1922,13 +2024,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 32]`. fn unlock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `969 + r * (183 Β±0)` + // Measured: `972 + r * (184 Β±0)` // Estimated: `129453 + r * (2568 Β±0)` - // Minimum execution time: 254_472_000 picoseconds. - Weight::from_parts(280_657_909, 129453) - // Standard Error: 20_131 - .saturating_add(Weight::from_parts(5_644_006, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Minimum execution time: 260_061_000 picoseconds. + Weight::from_parts(286_849_343, 129453) + // Standard Error: 22_406 + .saturating_add(Weight::from_parts(5_661_459, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -1938,6 +2040,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1951,13 +2055,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `858 + r * (3 Β±0)` - // Estimated: `6804 + r * (3 Β±0)` - // Minimum execution time: 250_535_000 picoseconds. - Weight::from_parts(270_318_376, 6804) - // Standard Error: 386 - .saturating_add(Weight::from_parts(174_627, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `861 + r * (3 Β±0)` + // Estimated: `9282 + r * (3 Β±0)` + // Minimum execution time: 250_922_000 picoseconds. + Weight::from_parts(277_294_913, 9282) + // Standard Error: 391 + .saturating_add(Weight::from_parts(169_457, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -1965,6 +2069,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1978,13 +2084,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_account_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2109 + r * (39 Β±0)` - // Estimated: `7899 + r * (40 Β±0)` - // Minimum execution time: 248_174_000 picoseconds. - Weight::from_parts(301_826_520, 7899) - // Standard Error: 801 - .saturating_add(Weight::from_parts(248_479, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Measured: `2112 + r * (39 Β±0)` + // Estimated: `10377 + r * (40 Β±0)` + // Minimum execution time: 267_293_000 picoseconds. + Weight::from_parts(309_125_635, 10377) + // Standard Error: 637 + .saturating_add(Weight::from_parts(245_508, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 40).saturating_mul(r.into())) } @@ -1992,6 +2098,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2007,13 +2115,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_instantiation_nonce(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `861 + r * (3 Β±0)` - // Estimated: `6801 + r * (3 Β±0)` - // Minimum execution time: 246_540_000 picoseconds. - Weight::from_parts(268_913_509, 6801) - // Standard Error: 378 - .saturating_add(Weight::from_parts(154_950, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `864 + r * (3 Β±0)` + // Estimated: `9279 + r * (3 Β±0)` + // Minimum execution time: 256_057_000 picoseconds. + Weight::from_parts(278_068_870, 9279) + // Standard Error: 335 + .saturating_add(Weight::from_parts(152_083, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -2022,10 +2130,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_777_000 picoseconds. - Weight::from_parts(1_707_601, 0) - // Standard Error: 14 - .saturating_add(Weight::from_parts(15_392, 0).saturating_mul(r.into())) + // Minimum execution time: 1_846_000 picoseconds. + Weight::from_parts(1_808_029, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(14_937, 0).saturating_mul(r.into())) } } @@ -2037,8 +2145,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 1_997_000 picoseconds. - Weight::from_parts(2_130_000, 1627) + // Minimum execution time: 2_103_000 picoseconds. + Weight::from_parts(2_260_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -2048,10 +2156,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `452 + k * (69 Β±0)` // Estimated: `442 + k * (70 Β±0)` - // Minimum execution time: 12_276_000 picoseconds. - Weight::from_parts(1_593_881, 442) - // Standard Error: 1_135 - .saturating_add(Weight::from_parts(1_109_302, 0).saturating_mul(k.into())) + // Minimum execution time: 12_173_000 picoseconds. + Weight::from_parts(12_515_000, 442) + // Standard Error: 1_033 + .saturating_add(Weight::from_parts(1_075_765, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -2065,8 +2173,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211 + c * (1 Β±0)` // Estimated: `6149 + c * (1 Β±0)` - // Minimum execution time: 8_176_000 picoseconds. - Weight::from_parts(8_555_388, 6149) + // Minimum execution time: 8_054_000 picoseconds. + Weight::from_parts(8_503_485, 6149) // Standard Error: 1 .saturating_add(Weight::from_parts(1_184, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) @@ -2081,8 +2189,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_270_000 picoseconds. - Weight::from_parts(16_779_000, 6450) + // Minimum execution time: 16_966_000 picoseconds. + Weight::from_parts(17_445_000, 6450) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2095,10 +2203,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `171 + k * (1 Β±0)` // Estimated: `3635 + k * (1 Β±0)` - // Minimum execution time: 3_572_000 picoseconds. - Weight::from_parts(1_950_905, 3635) - // Standard Error: 1_597 - .saturating_add(Weight::from_parts(1_123_190, 0).saturating_mul(k.into())) + // Minimum execution time: 3_643_000 picoseconds. + Weight::from_parts(3_714_000, 3635) + // Standard Error: 1_056 + .saturating_add(Weight::from_parts(1_089_042, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -2108,6 +2216,8 @@ impl WeightInfo for () { /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:0 w:1) @@ -2115,13 +2225,13 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 125952]`. fn v12_migration_step(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `325 + c * (1 Β±0)` - // Estimated: `6263 + c * (1 Β±0)` - // Minimum execution time: 16_873_000 picoseconds. - Weight::from_parts(16_790_402, 6263) + // Measured: `328 + c * (1 Β±0)` + // Estimated: `6266 + c * (1 Β±0)` + // Minimum execution time: 19_891_000 picoseconds. + Weight::from_parts(19_961_608, 6266) // Standard Error: 1 - .saturating_add(Weight::from_parts(396, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(Weight::from_parts(429, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } @@ -2131,8 +2241,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 11_904_000 picoseconds. - Weight::from_parts(12_785_000, 6380) + // Minimum execution time: 12_483_000 picoseconds. + Weight::from_parts(13_205_000, 6380) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2141,13 +2251,13 @@ impl WeightInfo for () { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) fn v14_migration_step() -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 44_920_000 picoseconds. - Weight::from_parts(46_163_000, 6292) + // Minimum execution time: 42_443_000 picoseconds. + Weight::from_parts(43_420_000, 6292) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2159,8 +2269,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 53_864_000 picoseconds. - Weight::from_parts(55_139_000, 6534) + // Minimum execution time: 52_282_000 picoseconds. + Weight::from_parts(54_110_000, 6534) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -2170,8 +2280,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_375_000 picoseconds. - Weight::from_parts(2_487_000, 1627) + // Minimum execution time: 2_539_000 picoseconds. + Weight::from_parts(2_644_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2183,8 +2293,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 11_580_000 picoseconds. - Weight::from_parts(11_980_000, 3631) + // Minimum execution time: 9_473_000 picoseconds. + Weight::from_parts(9_907_000, 3631) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -2194,8 +2304,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_557_000 picoseconds. - Weight::from_parts(4_807_000, 3607) + // Minimum execution time: 3_532_000 picoseconds. + Weight::from_parts(3_768_000, 3607) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -2206,8 +2316,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 6_253_000 picoseconds. - Weight::from_parts(6_479_000, 3632) + // Minimum execution time: 5_036_000 picoseconds. + Weight::from_parts(5_208_000, 3632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -2218,13 +2328,15 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_166_000 picoseconds. - Weight::from_parts(6_545_000, 3607) + // Minimum execution time: 4_881_000 picoseconds. + Weight::from_parts(5_149_000, 3607) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2240,22 +2352,24 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `801 + c * (1 Β±0)` - // Estimated: `6739 + c * (1 Β±0)` - // Minimum execution time: 282_232_000 picoseconds. - Weight::from_parts(266_148_573, 6739) - // Standard Error: 69 - .saturating_add(Weight::from_parts(34_592, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `804 + c * (1 Β±0)` + // Estimated: `9217 + c * (1 Β±0)` + // Minimum execution time: 280_047_000 picoseconds. + Weight::from_parts(270_065_882, 9217) + // Standard Error: 86 + .saturating_add(Weight::from_parts(34_361, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:3 w:3) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) @@ -2273,17 +2387,17 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `323` - // Estimated: `8737` - // Minimum execution time: 3_760_879_000 picoseconds. - Weight::from_parts(794_812_431, 8737) + // Measured: `326` + // Estimated: `8740` + // Minimum execution time: 3_776_071_000 picoseconds. + Weight::from_parts(706_212_213, 8740) // Standard Error: 149 - .saturating_add(Weight::from_parts(101_881, 0).saturating_mul(c.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_404, 0).saturating_mul(i.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_544, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + .saturating_add(Weight::from_parts(98_798, 0).saturating_mul(c.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(1_560, 0).saturating_mul(i.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(1_476, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -2292,6 +2406,8 @@ impl WeightInfo for () { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -2303,24 +2419,26 @@ impl WeightInfo for () { /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `560` - // Estimated: `6504` - // Minimum execution time: 1_953_162_000 picoseconds. - Weight::from_parts(374_252_840, 6504) + // Measured: `563` + // Estimated: `8982` + // Minimum execution time: 1_966_301_000 picoseconds. + Weight::from_parts(376_287_434, 8982) // Standard Error: 7 - .saturating_add(Weight::from_parts(1_630, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(1_643, 0).saturating_mul(i.into())) // Standard Error: 7 - .saturating_add(Weight::from_parts(1_650, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(10_u64)) + .saturating_add(Weight::from_parts(1_667, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2335,19 +2453,21 @@ impl WeightInfo for () { /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `826` - // Estimated: `6766` - // Minimum execution time: 187_899_000 picoseconds. - Weight::from_parts(195_510_000, 6766) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `829` + // Estimated: `9244` + // Minimum execution time: 197_571_000 picoseconds. + Weight::from_parts(206_612_000, 9244) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:1 w:1) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) @@ -2355,13 +2475,13 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 125952]`. fn upload_code(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 254_800_000 picoseconds. - Weight::from_parts(285_603_050, 3607) - // Standard Error: 62 - .saturating_add(Weight::from_parts(66_212, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Measured: `145` + // Estimated: `6085` + // Minimum execution time: 266_006_000 picoseconds. + Weight::from_parts(287_700_788, 6085) + // Standard Error: 67 + .saturating_add(Weight::from_parts(63_196, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -2369,7 +2489,7 @@ impl WeightInfo for () { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `System::EventTopics` (r:1 w:1) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) @@ -2378,8 +2498,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 43_553_000 picoseconds. - Weight::from_parts(45_036_000, 3780) + // Minimum execution time: 42_714_000 picoseconds. + Weight::from_parts(44_713_000, 3780) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -2395,8 +2515,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `552` // Estimated: `8967` - // Minimum execution time: 33_223_000 picoseconds. - Weight::from_parts(34_385_000, 8967) + // Minimum execution time: 33_516_000 picoseconds. + Weight::from_parts(34_823_000, 8967) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -2404,6 +2524,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2417,13 +2539,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_caller(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `866 + r * (6 Β±0)` - // Estimated: `6806 + r * (6 Β±0)` - // Minimum execution time: 254_213_000 picoseconds. - Weight::from_parts(273_464_980, 6806) - // Standard Error: 1_362 - .saturating_add(Weight::from_parts(322_619, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `869 + r * (6 Β±0)` + // Estimated: `9284 + r * (6 Β±0)` + // Minimum execution time: 246_563_000 picoseconds. + Weight::from_parts(274_999_814, 9284) + // Standard Error: 628 + .saturating_add(Weight::from_parts(347_395, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2431,6 +2553,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2444,13 +2568,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_is_contract(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `922 + r * (209 Β±0)` - // Estimated: `6826 + r * (2684 Β±0)` - // Minimum execution time: 250_273_000 picoseconds. - Weight::from_parts(122_072_782, 6826) - // Standard Error: 5_629 - .saturating_add(Weight::from_parts(3_490_256, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `925 + r * (209 Β±0)` + // Estimated: `9304 + r * (2684 Β±0)` + // Minimum execution time: 252_236_000 picoseconds. + Weight::from_parts(121_390_081, 9304) + // Standard Error: 6_206 + .saturating_add(Weight::from_parts(3_558_718, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 2684).saturating_mul(r.into())) @@ -2459,6 +2583,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1601 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2472,13 +2598,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (213 Β±0)` - // Estimated: `6830 + r * (2688 Β±0)` - // Minimum execution time: 255_187_000 picoseconds. - Weight::from_parts(118_082_505, 6830) - // Standard Error: 6_302 - .saturating_add(Weight::from_parts(4_246_968, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `924 + r * (213 Β±0)` + // Estimated: `9308 + r * (2688 Β±0)` + // Minimum execution time: 269_427_000 picoseconds. + Weight::from_parts(134_879_389, 9308) + // Standard Error: 6_711 + .saturating_add(Weight::from_parts(4_408_658, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 2688).saturating_mul(r.into())) @@ -2487,6 +2613,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2500,13 +2628,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_own_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `873 + r * (6 Β±0)` - // Estimated: `6815 + r * (6 Β±0)` - // Minimum execution time: 256_833_000 picoseconds. - Weight::from_parts(273_330_216, 6815) - // Standard Error: 881 - .saturating_add(Weight::from_parts(400_105, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `876 + r * (6 Β±0)` + // Estimated: `9293 + r * (6 Β±0)` + // Minimum execution time: 249_382_000 picoseconds. + Weight::from_parts(266_305_110, 9293) + // Standard Error: 1_592 + .saturating_add(Weight::from_parts(460_001, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2514,6 +2642,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2527,18 +2657,20 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_origin(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (3 Β±0)` - // Estimated: `6804 + r * (3 Β±0)` - // Minimum execution time: 244_193_000 picoseconds. - Weight::from_parts(271_221_908, 6804) - // Standard Error: 442 - .saturating_add(Weight::from_parts(176_480, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `866 + r * (3 Β±0)` + // Estimated: `9282 + r * (3 Β±0)` + // Minimum execution time: 252_145_000 picoseconds. + Weight::from_parts(277_211_668, 9282) + // Standard Error: 371 + .saturating_add(Weight::from_parts(174_394, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2552,13 +2684,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_root(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `753 + r * (3 Β±0)` - // Estimated: `6693 + r * (3 Β±0)` - // Minimum execution time: 232_603_000 picoseconds. - Weight::from_parts(260_577_368, 6693) - // Standard Error: 365 - .saturating_add(Weight::from_parts(158_126, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Measured: `756 + r * (3 Β±0)` + // Estimated: `9171 + r * (3 Β±0)` + // Minimum execution time: 251_595_000 picoseconds. + Weight::from_parts(268_102_575, 9171) + // Standard Error: 334 + .saturating_add(Weight::from_parts(154_836, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -2566,6 +2698,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2579,13 +2713,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `867 + r * (6 Β±0)` - // Estimated: `6807 + r * (6 Β±0)` - // Minimum execution time: 247_564_000 picoseconds. - Weight::from_parts(275_108_914, 6807) - // Standard Error: 505 - .saturating_add(Weight::from_parts(315_065, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `870 + r * (6 Β±0)` + // Estimated: `9285 + r * (6 Β±0)` + // Minimum execution time: 251_404_000 picoseconds. + Weight::from_parts(278_747_574, 9285) + // Standard Error: 782 + .saturating_add(Weight::from_parts(341_598, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2593,6 +2727,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2606,13 +2742,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_gas_left(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (6 Β±0)` - // Estimated: `6806 + r * (6 Β±0)` - // Minimum execution time: 258_799_000 picoseconds. - Weight::from_parts(274_338_256, 6806) - // Standard Error: 632 - .saturating_add(Weight::from_parts(355_032, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `866 + r * (6 Β±0)` + // Estimated: `9284 + r * (6 Β±0)` + // Minimum execution time: 255_649_000 picoseconds. + Weight::from_parts(276_509_641, 9284) + // Standard Error: 1_262 + .saturating_add(Weight::from_parts(380_225, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2620,6 +2756,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:2 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2633,13 +2771,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1007 + r * (6 Β±0)` - // Estimated: `6931 + r * (6 Β±0)` - // Minimum execution time: 253_335_000 picoseconds. - Weight::from_parts(273_013_859, 6931) - // Standard Error: 2_007 - .saturating_add(Weight::from_parts(1_540_735, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1010 + r * (6 Β±0)` + // Estimated: `9409 + r * (6 Β±0)` + // Minimum execution time: 267_143_000 picoseconds. + Weight::from_parts(298_166_116, 9409) + // Standard Error: 3_765 + .saturating_add(Weight::from_parts(1_620_886, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2647,6 +2785,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2660,13 +2800,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_value_transferred(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `877 + r * (6 Β±0)` - // Estimated: `6823 + r * (6 Β±0)` - // Minimum execution time: 252_325_000 picoseconds. - Weight::from_parts(274_733_944, 6823) - // Standard Error: 603 - .saturating_add(Weight::from_parts(314_467, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `880 + r * (6 Β±0)` + // Estimated: `9301 + r * (6 Β±0)` + // Minimum execution time: 250_013_000 picoseconds. + Weight::from_parts(281_469_583, 9301) + // Standard Error: 730 + .saturating_add(Weight::from_parts(339_260, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2674,6 +2814,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2687,13 +2829,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_minimum_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `875 + r * (6 Β±0)` - // Estimated: `6816 + r * (6 Β±0)` - // Minimum execution time: 250_698_000 picoseconds. - Weight::from_parts(271_707_578, 6816) - // Standard Error: 952 - .saturating_add(Weight::from_parts(318_412, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `878 + r * (6 Β±0)` + // Estimated: `9294 + r * (6 Β±0)` + // Minimum execution time: 256_219_000 picoseconds. + Weight::from_parts(275_309_266, 9294) + // Standard Error: 1_199 + .saturating_add(Weight::from_parts(350_824, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2701,6 +2843,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2714,13 +2858,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_block_number(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872 + r * (6 Β±0)` - // Estimated: `6819 + r * (6 Β±0)` - // Minimum execution time: 251_854_000 picoseconds. - Weight::from_parts(272_002_212, 6819) - // Standard Error: 622 - .saturating_add(Weight::from_parts(313_353, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `875 + r * (6 Β±0)` + // Estimated: `9297 + r * (6 Β±0)` + // Minimum execution time: 264_644_000 picoseconds. + Weight::from_parts(283_856_744, 9297) + // Standard Error: 623 + .saturating_add(Weight::from_parts(334_175, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2728,6 +2872,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2741,13 +2887,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_now(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (6 Β±0)` - // Estimated: `6804 + r * (6 Β±0)` - // Minimum execution time: 252_010_000 picoseconds. - Weight::from_parts(270_387_000, 6804) - // Standard Error: 659 - .saturating_add(Weight::from_parts(325_856, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `866 + r * (6 Β±0)` + // Estimated: `9282 + r * (6 Β±0)` + // Minimum execution time: 263_364_000 picoseconds. + Weight::from_parts(281_379_508, 9282) + // Standard Error: 639 + .saturating_add(Weight::from_parts(338_021, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2755,6 +2901,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2770,13 +2918,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_weight_to_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `937 + r * (14 Β±0)` - // Estimated: `6872 + r * (14 Β±0)` - // Minimum execution time: 247_933_000 picoseconds. - Weight::from_parts(281_550_162, 6872) - // Standard Error: 660 - .saturating_add(Weight::from_parts(1_090_869, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `940 + r * (14 Β±0)` + // Estimated: `9350 + r * (14 Β±0)` + // Minimum execution time: 269_667_000 picoseconds. + Weight::from_parts(284_662_802, 9350) + // Standard Error: 691 + .saturating_add(Weight::from_parts(799_249, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 14).saturating_mul(r.into())) } @@ -2784,6 +2932,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2797,13 +2947,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_input(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `865 + r * (6 Β±0)` - // Estimated: `6807 + r * (6 Β±0)` - // Minimum execution time: 251_158_000 picoseconds. - Weight::from_parts(274_623_152, 6807) - // Standard Error: 491 - .saturating_add(Weight::from_parts(263_916, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `868 + r * (6 Β±0)` + // Estimated: `9285 + r * (6 Β±0)` + // Minimum execution time: 267_018_000 picoseconds. + Weight::from_parts(281_842_630, 9285) + // Standard Error: 453 + .saturating_add(Weight::from_parts(255_373, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) } @@ -2811,6 +2961,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2824,19 +2976,21 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_input_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `869` - // Estimated: `6809` - // Minimum execution time: 263_205_000 picoseconds. - Weight::from_parts(216_792_893, 6809) + // Measured: `872` + // Estimated: `9287` + // Minimum execution time: 258_208_000 picoseconds. + Weight::from_parts(227_686_792, 9287) // Standard Error: 23 .saturating_add(Weight::from_parts(989, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2850,11 +3004,11 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `853 + r * (45 Β±0)` - // Estimated: `6793 + r * (45 Β±0)` - // Minimum execution time: 239_663_000 picoseconds. - Weight::from_parts(266_124_565, 6793) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `856 + r * (45 Β±0)` + // Estimated: `9271 + r * (45 Β±0)` + // Minimum execution time: 245_714_000 picoseconds. + Weight::from_parts(272_527_059, 9271) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(r.into())) } @@ -2862,6 +3016,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2875,19 +3031,21 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_return_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863` - // Estimated: `6810` - // Minimum execution time: 241_763_000 picoseconds. - Weight::from_parts(266_535_552, 6810) + // Measured: `866` + // Estimated: `9288` + // Minimum execution time: 256_060_000 picoseconds. + Weight::from_parts(276_710_811, 9288) // Standard Error: 0 - .saturating_add(Weight::from_parts(320, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(312, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:1 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:2) @@ -2901,28 +3059,30 @@ impl WeightInfo for () { /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// Storage: `Contracts::DeletionQueue` (r:0 w:1) /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2972 + r * (316 Β±0)` - // Estimated: `8912 + r * (5266 Β±0)` - // Minimum execution time: 265_888_000 picoseconds. - Weight::from_parts(291_232_232, 8912) - // Standard Error: 845_475 - .saturating_add(Weight::from_parts(104_398_867, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) - .saturating_add(RocksDbWeight::get().reads((7_u64).saturating_mul(r.into()))) + // Measured: `2902 + r * (529 Β±0)` + // Estimated: `11317 + r * (5479 Β±0)` + // Minimum execution time: 270_479_000 picoseconds. + Weight::from_parts(296_706_989, 11317) + // Standard Error: 813_407 + .saturating_add(Weight::from_parts(109_236_910, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) + .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((10_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 5266).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 5479).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2938,13 +3098,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_random(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `944 + r * (10 Β±0)` - // Estimated: `6885 + r * (10 Β±0)` - // Minimum execution time: 248_500_000 picoseconds. - Weight::from_parts(282_353_053, 6885) - // Standard Error: 1_144 - .saturating_add(Weight::from_parts(1_193_841, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `947 + r * (10 Β±0)` + // Estimated: `9363 + r * (10 Β±0)` + // Minimum execution time: 251_787_000 picoseconds. + Weight::from_parts(284_036_313, 9363) + // Standard Error: 2_560 + .saturating_add(Weight::from_parts(1_238_301, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } @@ -2952,6 +3112,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2965,13 +3127,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_deposit_event(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `863 + r * (10 Β±0)` - // Estimated: `6805 + r * (10 Β±0)` - // Minimum execution time: 248_130_000 picoseconds. - Weight::from_parts(279_583_178, 6805) - // Standard Error: 971 - .saturating_add(Weight::from_parts(1_987_941, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `866 + r * (10 Β±0)` + // Estimated: `9283 + r * (10 Β±0)` + // Minimum execution time: 250_566_000 picoseconds. + Weight::from_parts(275_402_784, 9283) + // Standard Error: 3_939 + .saturating_add(Weight::from_parts(1_941_995, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) } @@ -2979,6 +3141,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2993,15 +3157,15 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `880 + t * (32 Β±0)` - // Estimated: `6825 + t * (2508 Β±0)` - // Minimum execution time: 258_594_000 picoseconds. - Weight::from_parts(276_734_422, 6825) - // Standard Error: 102_093 - .saturating_add(Weight::from_parts(2_559_383, 0).saturating_mul(t.into())) + // Measured: `883 + t * (32 Β±0)` + // Estimated: `9303 + t * (2508 Β±0)` + // Minimum execution time: 267_544_000 picoseconds. + Weight::from_parts(280_117_825, 9303) + // Standard Error: 102_111 + .saturating_add(Weight::from_parts(3_253_038, 0).saturating_mul(t.into())) // Standard Error: 28 - .saturating_add(Weight::from_parts(501, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(668, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) @@ -3011,6 +3175,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3024,13 +3190,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_debug_message(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `862 + r * (7 Β±0)` - // Estimated: `6807 + r * (7 Β±0)` - // Minimum execution time: 154_564_000 picoseconds. - Weight::from_parts(168_931_365, 6807) - // Standard Error: 349 - .saturating_add(Weight::from_parts(226_848, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `865 + r * (7 Β±0)` + // Estimated: `9285 + r * (7 Β±0)` + // Minimum execution time: 157_769_000 picoseconds. + Weight::from_parts(173_026_565, 9285) + // Standard Error: 405 + .saturating_add(Weight::from_parts(219_727, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) } @@ -3038,6 +3204,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `MaxEncodedLen`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3051,13 +3219,13 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 1048576]`. fn seal_debug_message_per_byte(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `125813` - // Estimated: `131755` - // Minimum execution time: 394_382_000 picoseconds. - Weight::from_parts(376_780_500, 131755) + // Measured: `125816` + // Estimated: `131758` + // Minimum execution time: 403_961_000 picoseconds. + Weight::from_parts(376_480_872, 131758) // Standard Error: 12 - .saturating_add(Weight::from_parts(1_026, 0).saturating_mul(i.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_041, 0).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -3065,13 +3233,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_set_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `924 + r * (292 Β±0)` - // Estimated: `926 + r * (293 Β±0)` - // Minimum execution time: 249_757_000 picoseconds. - Weight::from_parts(177_324_374, 926) - // Standard Error: 9_512 - .saturating_add(Weight::from_parts(6_176_717, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `927 + r * (292 Β±0)` + // Estimated: `929 + r * (293 Β±0)` + // Minimum execution time: 256_272_000 picoseconds. + Weight::from_parts(181_058_379, 929) + // Standard Error: 9_838 + .saturating_add(Weight::from_parts(6_316_677, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3082,13 +3250,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_new_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1447` - // Estimated: `1430` - // Minimum execution time: 267_564_000 picoseconds. - Weight::from_parts(328_701_080, 1430) - // Standard Error: 61 - .saturating_add(Weight::from_parts(576, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) + // Measured: `1450` + // Estimated: `1433` + // Minimum execution time: 268_713_000 picoseconds. + Weight::from_parts(328_329_131, 1433) + // Standard Error: 66 + .saturating_add(Weight::from_parts(962, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -3096,13 +3264,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_set_storage_per_old_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1253 + n * (1 Β±0)` - // Estimated: `1253 + n * (1 Β±0)` - // Minimum execution time: 266_347_000 picoseconds. - Weight::from_parts(289_824_718, 1253) - // Standard Error: 34 - .saturating_add(Weight::from_parts(184, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1256 + n * (1 Β±0)` + // Estimated: `1256 + n * (1 Β±0)` + // Minimum execution time: 265_683_000 picoseconds. + Weight::from_parts(293_784_477, 1256) + // Standard Error: 31 + .saturating_add(Weight::from_parts(377, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3111,13 +3279,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_clear_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (288 Β±0)` - // Estimated: `927 + r * (289 Β±0)` - // Minimum execution time: 247_207_000 picoseconds. - Weight::from_parts(179_856_075, 927) - // Standard Error: 9_383 - .saturating_add(Weight::from_parts(6_053_198, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `924 + r * (288 Β±0)` + // Estimated: `930 + r * (289 Β±0)` + // Minimum execution time: 269_959_000 picoseconds. + Weight::from_parts(186_065_911, 930) + // Standard Error: 9_679 + .saturating_add(Weight::from_parts(6_286_855, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3128,13 +3296,11 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_clear_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1249 + n * (1 Β±0)` - // Estimated: `1249 + n * (1 Β±0)` - // Minimum execution time: 262_655_000 picoseconds. - Weight::from_parts(289_482_543, 1249) - // Standard Error: 35 - .saturating_add(Weight::from_parts(92, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1252 + n * (1 Β±0)` + // Estimated: `1252 + n * (1 Β±0)` + // Minimum execution time: 265_805_000 picoseconds. + Weight::from_parts(294_633_695, 1252) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3143,13 +3309,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_get_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `921 + r * (296 Β±0)` - // Estimated: `923 + r * (297 Β±0)` - // Minimum execution time: 247_414_000 picoseconds. - Weight::from_parts(203_317_182, 923) - // Standard Error: 7_191 - .saturating_add(Weight::from_parts(4_925_154, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `924 + r * (296 Β±0)` + // Estimated: `926 + r * (297 Β±0)` + // Minimum execution time: 264_592_000 picoseconds. + Weight::from_parts(204_670_461, 926) + // Standard Error: 6_869 + .saturating_add(Weight::from_parts(5_137_920, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 297).saturating_mul(r.into())) @@ -3159,13 +3325,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_get_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1265 + n * (1 Β±0)` - // Estimated: `1265 + n * (1 Β±0)` - // Minimum execution time: 258_910_000 picoseconds. - Weight::from_parts(283_086_514, 1265) - // Standard Error: 39 - .saturating_add(Weight::from_parts(980, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1268 + n * (1 Β±0)` + // Estimated: `1268 + n * (1 Β±0)` + // Minimum execution time: 273_165_000 picoseconds. + Weight::from_parts(297_883_669, 1268) + // Standard Error: 37 + .saturating_add(Weight::from_parts(576, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3174,13 +3340,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_contains_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `932 + r * (288 Β±0)` - // Estimated: `929 + r * (289 Β±0)` - // Minimum execution time: 252_410_000 picoseconds. - Weight::from_parts(201_227_879, 929) - // Standard Error: 6_899 - .saturating_add(Weight::from_parts(4_774_983, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `935 + r * (288 Β±0)` + // Estimated: `932 + r * (289 Β±0)` + // Minimum execution time: 252_257_000 picoseconds. + Weight::from_parts(205_018_595, 932) + // Standard Error: 7_605 + .saturating_add(Weight::from_parts(4_933_378, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 289).saturating_mul(r.into())) @@ -3190,13 +3356,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_contains_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1252 + n * (1 Β±0)` - // Estimated: `1252 + n * (1 Β±0)` - // Minimum execution time: 259_053_000 picoseconds. - Weight::from_parts(283_392_084, 1252) - // Standard Error: 41 - .saturating_add(Weight::from_parts(213, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1255 + n * (1 Β±0)` + // Estimated: `1255 + n * (1 Β±0)` + // Minimum execution time: 266_839_000 picoseconds. + Weight::from_parts(292_064_487, 1255) + // Standard Error: 34 + .saturating_add(Weight::from_parts(194, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3205,13 +3371,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_take_storage(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `914 + r * (296 Β±0)` - // Estimated: `919 + r * (297 Β±0)` - // Minimum execution time: 251_371_000 picoseconds. - Weight::from_parts(177_119_717, 919) - // Standard Error: 9_421 - .saturating_add(Weight::from_parts(6_226_005, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `917 + r * (296 Β±0)` + // Estimated: `922 + r * (297 Β±0)` + // Minimum execution time: 266_072_000 picoseconds. + Weight::from_parts(189_018_352, 922) + // Standard Error: 9_530 + .saturating_add(Weight::from_parts(6_481_011, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3222,13 +3388,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 16384]`. fn seal_take_storage_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1266 + n * (1 Β±0)` - // Estimated: `1266 + n * (1 Β±0)` - // Minimum execution time: 263_350_000 picoseconds. - Weight::from_parts(284_323_917, 1266) - // Standard Error: 31 - .saturating_add(Weight::from_parts(921, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1269 + n * (1 Β±0)` + // Estimated: `1269 + n * (1 Β±0)` + // Minimum execution time: 270_703_000 picoseconds. + Weight::from_parts(292_867_105, 1269) + // Standard Error: 30 + .saturating_add(Weight::from_parts(800, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3236,6 +3402,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1602 w:1601) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3249,13 +3417,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_transfer(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1415 + r * (45 Β±0)` - // Estimated: `7307 + r * (2520 Β±0)` - // Minimum execution time: 248_701_000 picoseconds. - Weight::from_parts(17_811_969, 7307) - // Standard Error: 35_154 - .saturating_add(Weight::from_parts(31_809_738, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1418 + r * (45 Β±0)` + // Estimated: `9785 + r * (2520 Β±0)` + // Minimum execution time: 251_843_000 picoseconds. + Weight::from_parts(181_026_888, 9785) + // Standard Error: 38_221 + .saturating_add(Weight::from_parts(30_626_681, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3265,6 +3433,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) @@ -3278,13 +3448,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 800]`. fn seal_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1260 + r * (245 Β±0)` - // Estimated: `9440 + r * (2721 Β±0)` - // Minimum execution time: 247_335_000 picoseconds. - Weight::from_parts(264_025_000, 9440) - // Standard Error: 121_299 - .saturating_add(Weight::from_parts(234_770_827, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `1263 + r * (245 Β±0)` + // Estimated: `9635 + r * (2721 Β±0)` + // Minimum execution time: 254_881_000 picoseconds. + Weight::from_parts(272_871_000, 9635) + // Standard Error: 94_527 + .saturating_add(Weight::from_parts(233_379_497, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) @@ -3294,6 +3464,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:736 w:0) @@ -3308,12 +3480,12 @@ impl WeightInfo for () { fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (576 Β±0)` - // Estimated: `6812 + r * (2637 Β±3)` - // Minimum execution time: 261_011_000 picoseconds. - Weight::from_parts(264_554_000, 6812) - // Standard Error: 104_415 - .saturating_add(Weight::from_parts(231_627_084, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Estimated: `9290 + r * (2637 Β±3)` + // Minimum execution time: 266_704_000 picoseconds. + Weight::from_parts(273_165_000, 9290) + // Standard Error: 141_486 + .saturating_add(Weight::from_parts(232_947_049, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3323,6 +3495,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:0) @@ -3337,15 +3511,15 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 1048576]`. fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1307 + t * (277 Β±0)` - // Estimated: `12197 + t * (5227 Β±0)` - // Minimum execution time: 445_561_000 picoseconds. - Weight::from_parts(62_287_490, 12197) - // Standard Error: 11_797_697 - .saturating_add(Weight::from_parts(357_530_529, 0).saturating_mul(t.into())) + // Measured: `1310 + t * (277 Β±0)` + // Estimated: `12200 + t * (5227 Β±0)` + // Minimum execution time: 446_451_000 picoseconds. + Weight::from_parts(68_175_222, 12200) + // Standard Error: 11_619_250 + .saturating_add(Weight::from_parts(354_237_260, 0).saturating_mul(t.into())) // Standard Error: 17 - .saturating_add(Weight::from_parts(970, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(13_u64)) + .saturating_add(Weight::from_parts(987, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(16_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(t.into()))) @@ -3355,6 +3529,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:802 w:802) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:801 w:801) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:801 w:800) @@ -3368,17 +3544,17 @@ impl WeightInfo for () { /// Storage: `System::EventTopics` (r:803 w:803) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:800 w:800) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `r` is `[1, 800]`. fn seal_instantiate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1278 + r * (255 Β±0)` - // Estimated: `9620 + r * (2731 Β±0)` - // Minimum execution time: 621_897_000 picoseconds. - Weight::from_parts(631_687_000, 9620) - // Standard Error: 215_241 - .saturating_add(Weight::from_parts(350_527_831, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `1281 + r * (255 Β±0)` + // Estimated: `9623 + r * (2731 Β±0)` + // Minimum execution time: 628_156_000 picoseconds. + Weight::from_parts(642_052_000, 9623) + // Standard Error: 223_957 + .saturating_add(Weight::from_parts(348_660_264, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(7_u64)) .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(r.into()))) @@ -3388,6 +3564,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:2 w:2) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:1) @@ -3401,23 +3579,23 @@ impl WeightInfo for () { /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 983040]`. /// The range of component `s` is `[0, 983040]`. fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1303 + t * (104 Β±0)` - // Estimated: `12211 + t * (2549 Β±1)` - // Minimum execution time: 2_181_184_000 picoseconds. - Weight::from_parts(1_194_190_111, 12211) - // Standard Error: 11_578_766 - .saturating_add(Weight::from_parts(6_361_884, 0).saturating_mul(t.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_025, 0).saturating_mul(i.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_158, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(16_u64)) + // Measured: `1306 + t * (104 Β±0)` + // Estimated: `12214 + t * (2549 Β±1)` + // Minimum execution time: 2_106_379_000 picoseconds. + Weight::from_parts(1_098_227_098, 12214) + // Standard Error: 12_549_729 + .saturating_add(Weight::from_parts(21_628_382, 0).saturating_mul(t.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_059, 0).saturating_mul(i.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_220, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(19_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) @@ -3427,6 +3605,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3440,13 +3620,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `862 + r * (8 Β±0)` - // Estimated: `6801 + r * (8 Β±0)` - // Minimum execution time: 241_609_000 picoseconds. - Weight::from_parts(268_716_874, 6801) - // Standard Error: 617 - .saturating_add(Weight::from_parts(377_753, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `865 + r * (8 Β±0)` + // Estimated: `9279 + r * (8 Β±0)` + // Minimum execution time: 257_102_000 picoseconds. + Weight::from_parts(278_920_692, 9279) + // Standard Error: 464 + .saturating_add(Weight::from_parts(374_120, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -3454,6 +3634,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3467,19 +3649,21 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `870` - // Estimated: `6808` - // Minimum execution time: 261_296_000 picoseconds. - Weight::from_parts(255_531_654, 6808) + // Measured: `873` + // Estimated: `9286` + // Minimum execution time: 266_609_000 picoseconds. + Weight::from_parts(263_034_132, 9286) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_081, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_082, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3493,13 +3677,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 Β±0)` - // Estimated: `6806 + r * (8 Β±0)` - // Minimum execution time: 243_583_000 picoseconds. - Weight::from_parts(270_025_058, 6806) - // Standard Error: 560 - .saturating_add(Weight::from_parts(767_519, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 Β±0)` + // Estimated: `9284 + r * (8 Β±0)` + // Minimum execution time: 246_275_000 picoseconds. + Weight::from_parts(279_091_594, 9284) + // Standard Error: 887 + .saturating_add(Weight::from_parts(810_394, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -3507,6 +3691,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3520,19 +3706,21 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6814` - // Minimum execution time: 253_798_000 picoseconds. - Weight::from_parts(265_542_351, 6814) + // Measured: `875` + // Estimated: `9292` + // Minimum execution time: 249_254_000 picoseconds. + Weight::from_parts(270_833_823, 9292) // Standard Error: 0 - .saturating_add(Weight::from_parts(3_343, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(3_347, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3546,13 +3734,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 Β±0)` - // Estimated: `6808 + r * (8 Β±0)` - // Minimum execution time: 247_332_000 picoseconds. - Weight::from_parts(269_183_656, 6808) - // Standard Error: 665 - .saturating_add(Weight::from_parts(443_386, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 Β±0)` + // Estimated: `9286 + r * (8 Β±0)` + // Minimum execution time: 259_131_000 picoseconds. + Weight::from_parts(272_665_020, 9286) + // Standard Error: 862 + .saturating_add(Weight::from_parts(451_247, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -3560,6 +3748,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3573,19 +3763,21 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6813` - // Minimum execution time: 250_855_000 picoseconds. - Weight::from_parts(258_752_975, 6813) + // Measured: `875` + // Estimated: `9291` + // Minimum execution time: 276_465_000 picoseconds. + Weight::from_parts(269_450_297, 9291) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_202, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(Weight::from_parts(1_190, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3599,13 +3791,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `864 + r * (8 Β±0)` - // Estimated: `6805 + r * (8 Β±0)` - // Minimum execution time: 240_733_000 picoseconds. - Weight::from_parts(269_134_358, 6805) - // Standard Error: 512 - .saturating_add(Weight::from_parts(440_043, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `867 + r * (8 Β±0)` + // Estimated: `9283 + r * (8 Β±0)` + // Minimum execution time: 248_349_000 picoseconds. + Weight::from_parts(273_660_686, 9283) + // Standard Error: 656 + .saturating_add(Weight::from_parts(444_293, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) } @@ -3613,6 +3805,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3626,19 +3820,21 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `6811` - // Minimum execution time: 247_377_000 picoseconds. - Weight::from_parts(261_077_322, 6811) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_195, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `875` + // Estimated: `9289` + // Minimum execution time: 252_107_000 picoseconds. + Weight::from_parts(267_427_726, 9289) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_199, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3652,13 +3848,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `997 + n * (1 Β±0)` - // Estimated: `6934 + n * (1 Β±0)` - // Minimum execution time: 307_337_000 picoseconds. - Weight::from_parts(326_710_473, 6934) - // Standard Error: 9 - .saturating_add(Weight::from_parts(5_765, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `1000 + n * (1 Β±0)` + // Estimated: `9412 + n * (1 Β±0)` + // Minimum execution time: 311_252_000 picoseconds. + Weight::from_parts(333_250_773, 9412) + // Standard Error: 12 + .saturating_add(Weight::from_parts(5_668, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -3666,6 +3862,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3679,13 +3877,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 160]`. fn seal_sr25519_verify(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `805 + r * (112 Β±0)` - // Estimated: `6748 + r * (112 Β±0)` - // Minimum execution time: 245_432_000 picoseconds. - Weight::from_parts(294_206_377, 6748) - // Standard Error: 7_229 - .saturating_add(Weight::from_parts(41_480_485, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `810 + r * (112 Β±0)` + // Estimated: `9226 + r * (112 Β±0)` + // Minimum execution time: 267_431_000 picoseconds. + Weight::from_parts(300_733_048, 9226) + // Standard Error: 8_806 + .saturating_add(Weight::from_parts(42_169_197, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(r.into())) } @@ -3693,6 +3891,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3706,13 +3906,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `907 + r * (76 Β±0)` - // Estimated: `6802 + r * (77 Β±0)` - // Minimum execution time: 247_788_000 picoseconds. - Weight::from_parts(303_940_062, 6802) - // Standard Error: 10_671 - .saturating_add(Weight::from_parts(45_730_772, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `910 + r * (76 Β±0)` + // Estimated: `9279 + r * (77 Β±0)` + // Minimum execution time: 265_905_000 picoseconds. + Weight::from_parts(305_381_951, 9279) + // Standard Error: 9_863 + .saturating_add(Weight::from_parts(45_771_182, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 77).saturating_mul(r.into())) } @@ -3720,6 +3920,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3733,13 +3935,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `877 + r * (42 Β±0)` - // Estimated: `6816 + r * (42 Β±0)` - // Minimum execution time: 248_825_000 picoseconds. - Weight::from_parts(286_832_225, 6816) - // Standard Error: 5_274 - .saturating_add(Weight::from_parts(11_889_262, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `880 + r * (42 Β±0)` + // Estimated: `9294 + r * (42 Β±0)` + // Minimum execution time: 266_701_000 picoseconds. + Weight::from_parts(292_407_888, 9294) + // Standard Error: 5_394 + .saturating_add(Weight::from_parts(11_896_065, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 42).saturating_mul(r.into())) } @@ -3747,6 +3949,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) @@ -3761,12 +3965,12 @@ impl WeightInfo for () { fn seal_set_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (965 Β±0)` - // Estimated: `6807 + r * (3090 Β±7)` - // Minimum execution time: 244_982_000 picoseconds. - Weight::from_parts(265_297_000, 6807) - // Standard Error: 39_895 - .saturating_add(Weight::from_parts(22_435_888, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Estimated: `9285 + r * (3090 Β±7)` + // Minimum execution time: 268_755_000 picoseconds. + Weight::from_parts(272_300_000, 9285) + // Standard Error: 43_747 + .saturating_add(Weight::from_parts(25_001_889, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) @@ -3776,6 +3980,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) @@ -3789,22 +3995,24 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 32]`. fn lock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `928 + r * (131 Β±0)` - // Estimated: `6878 + r * (2606 Β±0)` - // Minimum execution time: 246_455_000 picoseconds. - Weight::from_parts(275_334_919, 6878) - // Standard Error: 20_911 - .saturating_add(Weight::from_parts(6_427_525, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `937 + r * (131 Β±0)` + // Estimated: `9346 + r * (2607 Β±0)` + // Minimum execution time: 255_190_000 picoseconds. + Weight::from_parts(285_643_922, 9346) + // Standard Error: 23_582 + .saturating_add(Weight::from_parts(6_289_940, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2606).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 2607).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `MaxEncodedLen`) /// Storage: `Contracts::CodeInfoOf` (r:33 w:32) @@ -3818,13 +4026,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 32]`. fn unlock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `969 + r * (183 Β±0)` + // Measured: `972 + r * (184 Β±0)` // Estimated: `129453 + r * (2568 Β±0)` - // Minimum execution time: 254_472_000 picoseconds. - Weight::from_parts(280_657_909, 129453) - // Standard Error: 20_131 - .saturating_add(Weight::from_parts(5_644_006, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Minimum execution time: 260_061_000 picoseconds. + Weight::from_parts(286_849_343, 129453) + // Standard Error: 22_406 + .saturating_add(Weight::from_parts(5_661_459, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) @@ -3834,6 +4042,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3847,13 +4057,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `858 + r * (3 Β±0)` - // Estimated: `6804 + r * (3 Β±0)` - // Minimum execution time: 250_535_000 picoseconds. - Weight::from_parts(270_318_376, 6804) - // Standard Error: 386 - .saturating_add(Weight::from_parts(174_627, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `861 + r * (3 Β±0)` + // Estimated: `9282 + r * (3 Β±0)` + // Minimum execution time: 250_922_000 picoseconds. + Weight::from_parts(277_294_913, 9282) + // Standard Error: 391 + .saturating_add(Weight::from_parts(169_457, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -3861,6 +4071,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3874,13 +4086,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_account_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2109 + r * (39 Β±0)` - // Estimated: `7899 + r * (40 Β±0)` - // Minimum execution time: 248_174_000 picoseconds. - Weight::from_parts(301_826_520, 7899) - // Standard Error: 801 - .saturating_add(Weight::from_parts(248_479, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Measured: `2112 + r * (39 Β±0)` + // Estimated: `10377 + r * (40 Β±0)` + // Minimum execution time: 267_293_000 picoseconds. + Weight::from_parts(309_125_635, 10377) + // Standard Error: 637 + .saturating_add(Weight::from_parts(245_508, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 40).saturating_mul(r.into())) } @@ -3888,6 +4100,8 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:3 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -3903,13 +4117,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_instantiation_nonce(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `861 + r * (3 Β±0)` - // Estimated: `6801 + r * (3 Β±0)` - // Minimum execution time: 246_540_000 picoseconds. - Weight::from_parts(268_913_509, 6801) - // Standard Error: 378 - .saturating_add(Weight::from_parts(154_950, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `864 + r * (3 Β±0)` + // Estimated: `9279 + r * (3 Β±0)` + // Minimum execution time: 256_057_000 picoseconds. + Weight::from_parts(278_068_870, 9279) + // Standard Error: 335 + .saturating_add(Weight::from_parts(152_083, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -3918,9 +4132,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_777_000 picoseconds. - Weight::from_parts(1_707_601, 0) - // Standard Error: 14 - .saturating_add(Weight::from_parts(15_392, 0).saturating_mul(r.into())) + // Minimum execution time: 1_846_000 picoseconds. + Weight::from_parts(1_808_029, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(14_937, 0).saturating_mul(r.into())) } } diff --git a/substrate/frame/conviction-voting/src/weights.rs b/substrate/frame/conviction-voting/src/weights.rs index 225f5c2cadd..75d9e8499ed 100644 --- a/substrate/frame/conviction-voting/src/weights.rs +++ b/substrate/frame/conviction-voting/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_conviction_voting +//! Autogenerated weights for `pallet_conviction_voting` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/conviction-voting/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/conviction-voting/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_conviction_voting. +/// Weight functions needed for `pallet_conviction_voting`. pub trait WeightInfo { fn vote_new() -> Weight; fn vote_existing() -> Weight; @@ -61,280 +60,300 @@ pub trait WeightInfo { fn unlock() -> Weight; } -/// Weights for pallet_conviction_voting using the Substrate node and recommended hardware. +/// Weights for `pallet_conviction_voting` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(59), added: 2534, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote_new() -> Weight { // Proof Size summary in bytes: - // Measured: `13074` + // Measured: `13141` // Estimated: `219984` - // Minimum execution time: 112_936_000 picoseconds. - Weight::from_parts(116_972_000, 219984) + // Minimum execution time: 102_539_000 picoseconds. + Weight::from_parts(105_873_000, 219984) .saturating_add(T::DbWeight::get().reads(7_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(59), added: 2534, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote_existing() -> Weight { // Proof Size summary in bytes: - // Measured: `20216` + // Measured: `20283` // Estimated: `219984` - // Minimum execution time: 291_971_000 picoseconds. - Weight::from_parts(301_738_000, 219984) + // Minimum execution time: 275_424_000 picoseconds. + Weight::from_parts(283_690_000, 219984) .saturating_add(T::DbWeight::get().reads(7_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn remove_vote() -> Weight { // Proof Size summary in bytes: - // Measured: `19968` + // Measured: `20035` // Estimated: `219984` - // Minimum execution time: 262_582_000 picoseconds. - Weight::from_parts(270_955_000, 219984) + // Minimum execution time: 275_109_000 picoseconds. + Weight::from_parts(281_315_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn remove_other_vote() -> Weight { // Proof Size summary in bytes: - // Measured: `12675` + // Measured: `12742` // Estimated: `30706` - // Minimum execution time: 52_909_000 picoseconds. - Weight::from_parts(56_365_000, 30706) + // Minimum execution time: 49_629_000 picoseconds. + Weight::from_parts(51_300_000, 30706) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: ConvictionVoting VotingFor (r:2 w:2) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(59), added: 2534, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 1]`. fn delegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `240 + r * (1627 Β±0)` + // Measured: `306 + r * (1628 Β±0)` // Estimated: `109992 + r * (109992 Β±0)` - // Minimum execution time: 54_640_000 picoseconds. - Weight::from_parts(57_185_281, 109992) - // Standard Error: 193_362 - .saturating_add(Weight::from_parts(44_897_418, 0).saturating_mul(r.into())) + // Minimum execution time: 45_776_000 picoseconds. + Weight::from_parts(47_917_822, 109992) + // Standard Error: 124_174 + .saturating_add(Weight::from_parts(43_171_077, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 109992).saturating_mul(r.into())) } - /// Storage: ConvictionVoting VotingFor (r:2 w:2) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 1]`. fn undelegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `406 + r * (1376 Β±0)` + // Measured: `472 + r * (1377 Β±0)` // Estimated: `109992 + r * (109992 Β±0)` - // Minimum execution time: 26_514_000 picoseconds. - Weight::from_parts(28_083_732, 109992) - // Standard Error: 104_905 - .saturating_add(Weight::from_parts(40_722_467, 0).saturating_mul(r.into())) + // Minimum execution time: 23_600_000 picoseconds. + Weight::from_parts(25_001_426, 109992) + // Standard Error: 72_034 + .saturating_add(Weight::from_parts(37_851_873, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 109992).saturating_mul(r.into())) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(59), added: 2534, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn unlock() -> Weight { // Proof Size summary in bytes: - // Measured: `11734` + // Measured: `11800` // Estimated: `30706` - // Minimum execution time: 71_140_000 picoseconds. - Weight::from_parts(77_388_000, 30706) + // Minimum execution time: 66_247_000 picoseconds. + Weight::from_parts(67_552_000, 30706) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(59), added: 2534, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote_new() -> Weight { // Proof Size summary in bytes: - // Measured: `13074` + // Measured: `13141` // Estimated: `219984` - // Minimum execution time: 112_936_000 picoseconds. - Weight::from_parts(116_972_000, 219984) + // Minimum execution time: 102_539_000 picoseconds. + Weight::from_parts(105_873_000, 219984) .saturating_add(RocksDbWeight::get().reads(7_u64)) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(59), added: 2534, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote_existing() -> Weight { // Proof Size summary in bytes: - // Measured: `20216` + // Measured: `20283` // Estimated: `219984` - // Minimum execution time: 291_971_000 picoseconds. - Weight::from_parts(301_738_000, 219984) + // Minimum execution time: 275_424_000 picoseconds. + Weight::from_parts(283_690_000, 219984) .saturating_add(RocksDbWeight::get().reads(7_u64)) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn remove_vote() -> Weight { // Proof Size summary in bytes: - // Measured: `19968` + // Measured: `20035` // Estimated: `219984` - // Minimum execution time: 262_582_000 picoseconds. - Weight::from_parts(270_955_000, 219984) + // Minimum execution time: 275_109_000 picoseconds. + Weight::from_parts(281_315_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn remove_other_vote() -> Weight { // Proof Size summary in bytes: - // Measured: `12675` + // Measured: `12742` // Estimated: `30706` - // Minimum execution time: 52_909_000 picoseconds. - Weight::from_parts(56_365_000, 30706) + // Minimum execution time: 49_629_000 picoseconds. + Weight::from_parts(51_300_000, 30706) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: ConvictionVoting VotingFor (r:2 w:2) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(59), added: 2534, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 1]`. fn delegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `240 + r * (1627 Β±0)` + // Measured: `306 + r * (1628 Β±0)` // Estimated: `109992 + r * (109992 Β±0)` - // Minimum execution time: 54_640_000 picoseconds. - Weight::from_parts(57_185_281, 109992) - // Standard Error: 193_362 - .saturating_add(Weight::from_parts(44_897_418, 0).saturating_mul(r.into())) + // Minimum execution time: 45_776_000 picoseconds. + Weight::from_parts(47_917_822, 109992) + // Standard Error: 124_174 + .saturating_add(Weight::from_parts(43_171_077, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 109992).saturating_mul(r.into())) } - /// Storage: ConvictionVoting VotingFor (r:2 w:2) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 1]`. fn undelegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `406 + r * (1376 Β±0)` + // Measured: `472 + r * (1377 Β±0)` // Estimated: `109992 + r * (109992 Β±0)` - // Minimum execution time: 26_514_000 picoseconds. - Weight::from_parts(28_083_732, 109992) - // Standard Error: 104_905 - .saturating_add(Weight::from_parts(40_722_467, 0).saturating_mul(r.into())) + // Minimum execution time: 23_600_000 picoseconds. + Weight::from_parts(25_001_426, 109992) + // Standard Error: 72_034 + .saturating_add(Weight::from_parts(37_851_873, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 109992).saturating_mul(r.into())) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(59), added: 2534, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(59), added: 2534, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn unlock() -> Weight { // Proof Size summary in bytes: - // Measured: `11734` + // Measured: `11800` // Estimated: `30706` - // Minimum execution time: 71_140_000 picoseconds. - Weight::from_parts(77_388_000, 30706) + // Minimum execution time: 66_247_000 picoseconds. + Weight::from_parts(67_552_000, 30706) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/substrate/frame/core-fellowship/src/weights.rs b/substrate/frame/core-fellowship/src/weights.rs index 8bbfd1a4dd8..fce1d3747a8 100644 --- a/substrate/frame/core-fellowship/src/weights.rs +++ b/substrate/frame/core-fellowship/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_core_fellowship +//! Autogenerated weights for `pallet_core_fellowship` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/core-fellowship/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/core-fellowship/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_core_fellowship. +/// Weight functions needed for `pallet_core_fellowship`. pub trait WeightInfo { fn set_params() -> Weight; fn bump_offboard() -> Weight; @@ -64,336 +63,344 @@ pub trait WeightInfo { fn submit_evidence() -> Weight; } -/// Weights for pallet_core_fellowship using the Substrate node and recommended hardware. +/// Weights for `pallet_core_fellowship` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: CoreFellowship Params (r:0 w:1) - /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Params` (r:0 w:1) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) fn set_params() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_454_000 picoseconds. - Weight::from_parts(9_804_000, 0) + // Minimum execution time: 7_146_000 picoseconds. + Weight::from_parts(7_426_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Params (r:1 w:0) - /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:1 w:0) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Params` (r:1 w:0) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:1 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn bump_offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `16887` + // Measured: `17274` // Estimated: `19894` - // Minimum execution time: 58_489_000 picoseconds. - Weight::from_parts(60_202_000, 19894) + // Minimum execution time: 54_511_000 picoseconds. + Weight::from_parts(56_995_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Params (r:1 w:0) - /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:1 w:0) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Params` (r:1 w:0) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:1 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn bump_demote() -> Weight { // Proof Size summary in bytes: - // Measured: `16997` + // Measured: `17384` // Estimated: `19894` - // Minimum execution time: 60_605_000 picoseconds. - Weight::from_parts(63_957_000, 19894) + // Minimum execution time: 56_453_000 picoseconds. + Weight::from_parts(59_030_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn set_active() -> Weight { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3514` - // Minimum execution time: 17_816_000 picoseconds. - Weight::from_parts(18_524_000, 3514) + // Minimum execution time: 15_940_000 picoseconds. + Weight::from_parts(16_381_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:0 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:0 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn induct() -> Weight { // Proof Size summary in bytes: // Measured: `146` // Estimated: `3514` - // Minimum execution time: 27_249_000 picoseconds. - Weight::from_parts(28_049_000, 3514) + // Minimum execution time: 24_193_000 picoseconds. + Weight::from_parts(24_963_000, 3514) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: CoreFellowship Params (r:1 w:0) - /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:0 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:0 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Params` (r:1 w:0) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn promote() -> Weight { // Proof Size summary in bytes: // Measured: `16865` // Estimated: `19894` - // Minimum execution time: 56_642_000 picoseconds. - Weight::from_parts(59_353_000, 19894) + // Minimum execution time: 48_138_000 picoseconds. + Weight::from_parts(50_007_000, 19894) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:0 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:0 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) fn offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `359` + // Measured: `293` // Estimated: `3514` - // Minimum execution time: 17_459_000 picoseconds. - Weight::from_parts(18_033_000, 3514) + // Minimum execution time: 15_225_000 picoseconds. + Weight::from_parts(15_730_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) fn import() -> Weight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3514` - // Minimum execution time: 16_728_000 picoseconds. - Weight::from_parts(17_263_000, 3514) + // Minimum execution time: 14_507_000 picoseconds. + Weight::from_parts(14_935_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) fn approve() -> Weight { // Proof Size summary in bytes: // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 41_487_000 picoseconds. - Weight::from_parts(43_459_000, 19894) + // Minimum execution time: 34_050_000 picoseconds. + Weight::from_parts(36_323_000, 19894) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: CoreFellowship Member (r:1 w:0) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:0) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) fn submit_evidence() -> Weight { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 26_033_000 picoseconds. - Weight::from_parts(26_612_000, 19894) + // Minimum execution time: 24_016_000 picoseconds. + Weight::from_parts(24_607_000, 19894) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: CoreFellowship Params (r:0 w:1) - /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Params` (r:0 w:1) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) fn set_params() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_454_000 picoseconds. - Weight::from_parts(9_804_000, 0) + // Minimum execution time: 7_146_000 picoseconds. + Weight::from_parts(7_426_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Params (r:1 w:0) - /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:1 w:0) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Params` (r:1 w:0) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:1 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn bump_offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `16887` + // Measured: `17274` // Estimated: `19894` - // Minimum execution time: 58_489_000 picoseconds. - Weight::from_parts(60_202_000, 19894) + // Minimum execution time: 54_511_000 picoseconds. + Weight::from_parts(56_995_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Params (r:1 w:0) - /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:1 w:0) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Params` (r:1 w:0) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:1 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn bump_demote() -> Weight { // Proof Size summary in bytes: - // Measured: `16997` + // Measured: `17384` // Estimated: `19894` - // Minimum execution time: 60_605_000 picoseconds. - Weight::from_parts(63_957_000, 19894) + // Minimum execution time: 56_453_000 picoseconds. + Weight::from_parts(59_030_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn set_active() -> Weight { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3514` - // Minimum execution time: 17_816_000 picoseconds. - Weight::from_parts(18_524_000, 3514) + // Minimum execution time: 15_940_000 picoseconds. + Weight::from_parts(16_381_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:0 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:0 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn induct() -> Weight { // Proof Size summary in bytes: // Measured: `146` // Estimated: `3514` - // Minimum execution time: 27_249_000 picoseconds. - Weight::from_parts(28_049_000, 3514) + // Minimum execution time: 24_193_000 picoseconds. + Weight::from_parts(24_963_000, 3514) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: CoreFellowship Params (r:1 w:0) - /// Proof: CoreFellowship Params (max_values: Some(1), max_size: Some(364), added: 859, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:0 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:0 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Params` (r:1 w:0) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn promote() -> Weight { // Proof Size summary in bytes: // Measured: `16865` // Estimated: `19894` - // Minimum execution time: 56_642_000 picoseconds. - Weight::from_parts(59_353_000, 19894) + // Minimum execution time: 48_138_000 picoseconds. + Weight::from_parts(50_007_000, 19894) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:0 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:0 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) fn offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `359` + // Measured: `293` // Estimated: `3514` - // Minimum execution time: 17_459_000 picoseconds. - Weight::from_parts(18_033_000, 3514) + // Minimum execution time: 15_225_000 picoseconds. + Weight::from_parts(15_730_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) fn import() -> Weight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3514` - // Minimum execution time: 16_728_000 picoseconds. - Weight::from_parts(17_263_000, 3514) + // Minimum execution time: 14_507_000 picoseconds. + Weight::from_parts(14_935_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: CoreFellowship Member (r:1 w:1) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:1 w:1) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) fn approve() -> Weight { // Proof Size summary in bytes: // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 41_487_000 picoseconds. - Weight::from_parts(43_459_000, 19894) + // Minimum execution time: 34_050_000 picoseconds. + Weight::from_parts(36_323_000, 19894) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: CoreFellowship Member (r:1 w:0) - /// Proof: CoreFellowship Member (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: CoreFellowship MemberEvidence (r:1 w:1) - /// Proof: CoreFellowship MemberEvidence (max_values: None, max_size: Some(16429), added: 18904, mode: MaxEncodedLen) + /// Storage: `CoreFellowship::Member` (r:1 w:0) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) fn submit_evidence() -> Weight { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 26_033_000 picoseconds. - Weight::from_parts(26_612_000, 19894) + // Minimum execution time: 24_016_000 picoseconds. + Weight::from_parts(24_607_000, 19894) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/democracy/src/weights.rs b/substrate/frame/democracy/src/weights.rs index 241f6c3cb38..d6097922a82 100644 --- a/substrate/frame/democracy/src/weights.rs +++ b/substrate/frame/democracy/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_democracy +//! Autogenerated weights for `pallet_democracy` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/democracy/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/democracy/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_democracy. +/// Weight functions needed for `pallet_democracy`. pub trait WeightInfo { fn propose() -> Weight; fn second() -> Weight; @@ -82,904 +81,916 @@ pub trait WeightInfo { fn clear_referendum_metadata() -> Weight; } -/// Weights for pallet_democracy using the Substrate node and recommended hardware. +/// Weights for `pallet_democracy` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Democracy PublicPropCount (r:1 w:1) - /// Proof: Democracy PublicPropCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:0) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:0 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicPropCount` (r:1 w:1) + /// Proof: `Democracy::PublicPropCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::PublicProps` (r:1 w:1) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Blacklist` (r:1 w:0) + /// Proof: `Democracy::Blacklist` (`max_values`: None, `max_size`: Some(3238), added: 5713, mode: `MaxEncodedLen`) + /// Storage: `Democracy::DepositOf` (r:0 w:1) + /// Proof: `Democracy::DepositOf` (`max_values`: None, `max_size`: Some(3230), added: 5705, mode: `MaxEncodedLen`) fn propose() -> Weight { // Proof Size summary in bytes: - // Measured: `4801` + // Measured: `4834` // Estimated: `18187` - // Minimum execution time: 49_339_000 picoseconds. - Weight::from_parts(50_942_000, 18187) + // Minimum execution time: 39_930_000 picoseconds. + Weight::from_parts(41_746_000, 18187) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) + /// Storage: `Democracy::DepositOf` (r:1 w:1) + /// Proof: `Democracy::DepositOf` (`max_values`: None, `max_size`: Some(3230), added: 5705, mode: `MaxEncodedLen`) fn second() -> Weight { // Proof Size summary in bytes: - // Measured: `3556` + // Measured: `3589` // Estimated: `6695` - // Minimum execution time: 43_291_000 picoseconds. - Weight::from_parts(44_856_000, 6695) + // Minimum execution time: 36_490_000 picoseconds. + Weight::from_parts(37_615_000, 6695) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn vote_new() -> Weight { // Proof Size summary in bytes: - // Measured: `3470` + // Measured: `3503` // Estimated: `7260` - // Minimum execution time: 61_890_000 picoseconds. - Weight::from_parts(63_626_000, 7260) + // Minimum execution time: 54_257_000 picoseconds. + Weight::from_parts(55_912_000, 7260) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn vote_existing() -> Weight { // Proof Size summary in bytes: - // Measured: `3492` + // Measured: `3525` // Estimated: `7260` - // Minimum execution time: 67_802_000 picoseconds. - Weight::from_parts(69_132_000, 7260) + // Minimum execution time: 56_878_000 picoseconds. + Weight::from_parts(58_796_000, 7260) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy Cancellations (r:1 w:1) - /// Proof: Democracy Cancellations (max_values: None, max_size: Some(33), added: 2508, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Cancellations` (r:1 w:1) + /// Proof: `Democracy::Cancellations` (`max_values`: None, `max_size`: Some(33), added: 2508, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn emergency_cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `366` + // Measured: `399` // Estimated: `3666` - // Minimum execution time: 25_757_000 picoseconds. - Weight::from_parts(27_226_000, 3666) + // Minimum execution time: 22_700_000 picoseconds. + Weight::from_parts(23_539_000, 3666) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:3 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:0 w:1) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:1 w:1) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::DepositOf` (r:1 w:1) + /// Proof: `Democracy::DepositOf` (`max_values`: None, `max_size`: Some(3230), added: 5705, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:3 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Democracy::NextExternal` (r:1 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Blacklist` (r:0 w:1) + /// Proof: `Democracy::Blacklist` (`max_values`: None, `max_size`: Some(3238), added: 5713, mode: `MaxEncodedLen`) fn blacklist() -> Weight { // Proof Size summary in bytes: - // Measured: `5910` + // Measured: `5943` // Estimated: `18187` - // Minimum execution time: 113_060_000 picoseconds. - Weight::from_parts(114_813_000, 18187) + // Minimum execution time: 95_398_000 picoseconds. + Weight::from_parts(97_261_000, 18187) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:0) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Blacklist` (r:1 w:0) + /// Proof: `Democracy::Blacklist` (`max_values`: None, `max_size`: Some(3238), added: 5713, mode: `MaxEncodedLen`) fn external_propose() -> Weight { // Proof Size summary in bytes: - // Measured: `3416` + // Measured: `3449` // Estimated: `6703` - // Minimum execution time: 13_413_000 picoseconds. - Weight::from_parts(13_794_000, 6703) + // Minimum execution time: 11_745_000 picoseconds. + Weight::from_parts(12_304_000, 6703) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy NextExternal (r:0 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:0 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) fn external_propose_majority() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_213_000 picoseconds. - Weight::from_parts(3_429_000, 0) + // Minimum execution time: 2_710_000 picoseconds. + Weight::from_parts(2_918_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy NextExternal (r:0 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:0 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) fn external_propose_default() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_280_000 picoseconds. - Weight::from_parts(3_389_000, 0) + // Minimum execution time: 2_664_000 picoseconds. + Weight::from_parts(2_776_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:1) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:2) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:0 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumCount` (r:1 w:1) + /// Proof: `Democracy::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:2) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:0 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) fn fast_track() -> Weight { // Proof Size summary in bytes: - // Measured: `286` + // Measured: `319` // Estimated: `3518` - // Minimum execution time: 28_142_000 picoseconds. - Weight::from_parts(28_862_000, 3518) + // Minimum execution time: 22_585_000 picoseconds. + Weight::from_parts(23_689_000, 3518) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:1) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Blacklist` (r:1 w:1) + /// Proof: `Democracy::Blacklist` (`max_values`: None, `max_size`: Some(3238), added: 5713, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn veto_external() -> Weight { // Proof Size summary in bytes: - // Measured: `3519` + // Measured: `3552` // Estimated: `6703` - // Minimum execution time: 32_395_000 picoseconds. - Weight::from_parts(33_617_000, 6703) + // Minimum execution time: 26_391_000 picoseconds. + Weight::from_parts(27_141_000, 6703) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:1 w:1) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::DepositOf` (r:1 w:1) + /// Proof: `Democracy::DepositOf` (`max_values`: None, `max_size`: Some(3230), added: 5705, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn cancel_proposal() -> Weight { // Proof Size summary in bytes: - // Measured: `5821` + // Measured: `5854` // Estimated: `18187` - // Minimum execution time: 92_255_000 picoseconds. - Weight::from_parts(93_704_000, 18187) + // Minimum execution time: 77_905_000 picoseconds. + Weight::from_parts(79_628_000, 18187) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:0 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:0 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) fn cancel_referendum() -> Weight { // Proof Size summary in bytes: - // Measured: `271` + // Measured: `304` // Estimated: `3518` - // Minimum execution time: 19_623_000 picoseconds. - Weight::from_parts(20_545_000, 3518) + // Minimum execution time: 15_735_000 picoseconds. + Weight::from_parts(16_525_000, 3518) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Democracy LowestUnbaked (r:1 w:1) - /// Proof: Democracy LowestUnbaked (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:0) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::LowestUnbaked` (r:1 w:1) + /// Proof: `Democracy::LowestUnbaked` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumCount` (r:1 w:0) + /// Proof: `Democracy::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:99 w:0) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn on_initialize_base(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `244 + r * (86 Β±0)` + // Measured: `277 + r * (86 Β±0)` // Estimated: `1489 + r * (2676 Β±0)` - // Minimum execution time: 7_032_000 picoseconds. - Weight::from_parts(7_931_421, 1489) - // Standard Error: 7_395 - .saturating_add(Weight::from_parts(3_236_964, 0).saturating_mul(r.into())) + // Minimum execution time: 5_274_000 picoseconds. + Weight::from_parts(6_162_399, 1489) + // Standard Error: 6_924 + .saturating_add(Weight::from_parts(3_186_702, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) } - /// Storage: Democracy LowestUnbaked (r:1 w:1) - /// Proof: Democracy LowestUnbaked (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:0) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy LastTabledWasExternal (r:1 w:0) - /// Proof: Democracy LastTabledWasExternal (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::LowestUnbaked` (r:1 w:1) + /// Proof: `Democracy::LowestUnbaked` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumCount` (r:1 w:0) + /// Proof: `Democracy::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::LastTabledWasExternal` (r:1 w:0) + /// Proof: `Democracy::LastTabledWasExternal` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Democracy::NextExternal` (r:1 w:0) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::PublicProps` (r:1 w:0) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:99 w:0) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `244 + r * (86 Β±0)` + // Measured: `277 + r * (86 Β±0)` // Estimated: `18187 + r * (2676 Β±0)` - // Minimum execution time: 10_524_000 picoseconds. - Weight::from_parts(10_369_064, 18187) - // Standard Error: 8_385 - .saturating_add(Weight::from_parts(3_242_334, 0).saturating_mul(r.into())) + // Minimum execution time: 7_950_000 picoseconds. + Weight::from_parts(7_381_228, 18187) + // Standard Error: 6_650 + .saturating_add(Weight::from_parts(3_198_515, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) } - /// Storage: Democracy VotingOf (r:3 w:3) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:99) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Democracy::VotingOf` (r:3 w:3) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:99 w:99) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn delegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `830 + r * (108 Β±0)` + // Measured: `863 + r * (108 Β±0)` // Estimated: `19800 + r * (2676 Β±0)` - // Minimum execution time: 46_106_000 picoseconds. - Weight::from_parts(48_936_654, 19800) - // Standard Error: 8_879 - .saturating_add(Weight::from_parts(4_708_141, 0).saturating_mul(r.into())) + // Minimum execution time: 40_109_000 picoseconds. + Weight::from_parts(43_164_384, 19800) + // Standard Error: 7_267 + .saturating_add(Weight::from_parts(4_161_526, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) } - /// Storage: Democracy VotingOf (r:2 w:2) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:99) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::VotingOf` (r:2 w:2) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:99 w:99) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn undelegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `493 + r * (108 Β±0)` + // Measured: `526 + r * (108 Β±0)` // Estimated: `13530 + r * (2676 Β±0)` - // Minimum execution time: 21_078_000 picoseconds. - Weight::from_parts(22_732_737, 13530) - // Standard Error: 7_969 - .saturating_add(Weight::from_parts(4_626_458, 0).saturating_mul(r.into())) + // Minimum execution time: 17_466_000 picoseconds. + Weight::from_parts(18_004_456, 13530) + // Standard Error: 6_327 + .saturating_add(Weight::from_parts(4_194_583, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) } - /// Storage: Democracy PublicProps (r:0 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:0 w:1) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) fn clear_public_proposals() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_229_000 picoseconds. - Weight::from_parts(3_415_000, 0) + // Minimum execution time: 2_824_000 picoseconds. + Weight::from_parts(2_948_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn unlock_remove(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `563` + // Measured: `596` // Estimated: `7260` - // Minimum execution time: 25_735_000 picoseconds. - Weight::from_parts(41_341_468, 7260) - // Standard Error: 3_727 - .saturating_add(Weight::from_parts(94_755, 0).saturating_mul(r.into())) + // Minimum execution time: 23_373_000 picoseconds. + Weight::from_parts(34_306_582, 7260) + // Standard Error: 2_849 + .saturating_add(Weight::from_parts(85_027, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn unlock_set(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `564 + r * (22 Β±0)` + // Measured: `597 + r * (22 Β±0)` // Estimated: `7260` - // Minimum execution time: 36_233_000 picoseconds. - Weight::from_parts(39_836_017, 7260) - // Standard Error: 1_791 - .saturating_add(Weight::from_parts(132_158, 0).saturating_mul(r.into())) + // Minimum execution time: 31_574_000 picoseconds. + Weight::from_parts(33_906_658, 7260) + // Standard Error: 1_514 + .saturating_add(Weight::from_parts(124_471, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 100]`. fn remove_vote(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `728 + r * (26 Β±0)` + // Measured: `761 + r * (26 Β±0)` // Estimated: `7260` - // Minimum execution time: 16_081_000 picoseconds. - Weight::from_parts(19_624_101, 7260) - // Standard Error: 1_639 - .saturating_add(Weight::from_parts(133_630, 0).saturating_mul(r.into())) + // Minimum execution time: 15_204_000 picoseconds. + Weight::from_parts(18_405_879, 7260) + // Standard Error: 1_851 + .saturating_add(Weight::from_parts(119_018, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 100]`. fn remove_other_vote(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `728 + r * (26 Β±0)` + // Measured: `761 + r * (26 Β±0)` // Estimated: `7260` - // Minimum execution time: 15_634_000 picoseconds. - Weight::from_parts(19_573_407, 7260) - // Standard Error: 1_790 - .saturating_add(Weight::from_parts(139_707, 0).saturating_mul(r.into())) + // Minimum execution time: 15_120_000 picoseconds. + Weight::from_parts(18_282_222, 7260) + // Standard Error: 1_669 + .saturating_add(Weight::from_parts(127_649, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:0) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:0 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_external_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `356` + // Measured: `456` // Estimated: `3556` - // Minimum execution time: 18_344_000 picoseconds. - Weight::from_parts(18_727_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Minimum execution time: 17_351_000 picoseconds. + Weight::from_parts(17_964_000, 3556) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:0) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn clear_external_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `286` + // Measured: `319` // Estimated: `3518` - // Minimum execution time: 16_497_000 picoseconds. - Weight::from_parts(16_892_000, 3518) + // Minimum execution time: 13_669_000 picoseconds. + Weight::from_parts(14_410_000, 3518) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:1 w:0) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:0 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_proposal_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `4888` + // Measured: `4988` // Estimated: `18187` - // Minimum execution time: 39_517_000 picoseconds. - Weight::from_parts(40_632_000, 18187) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Minimum execution time: 39_162_000 picoseconds. + Weight::from_parts(40_109_000, 18187) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:1 w:0) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn clear_proposal_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `4822` + // Measured: `4855` // Estimated: `18187` - // Minimum execution time: 37_108_000 picoseconds. - Weight::from_parts(37_599_000, 18187) + // Minimum execution time: 34_141_000 picoseconds. + Weight::from_parts(34_732_000, 18187) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:0 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_referendum_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 13_997_000 picoseconds. - Weight::from_parts(14_298_000, 3556) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 13_413_000 picoseconds. + Weight::from_parts(14_039_000, 3556) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:0) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn clear_referendum_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `302` + // Measured: `335` // Estimated: `3666` - // Minimum execution time: 18_122_000 picoseconds. - Weight::from_parts(18_655_000, 3666) + // Minimum execution time: 16_010_000 picoseconds. + Weight::from_parts(16_474_000, 3666) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Democracy PublicPropCount (r:1 w:1) - /// Proof: Democracy PublicPropCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:0) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:0 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicPropCount` (r:1 w:1) + /// Proof: `Democracy::PublicPropCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::PublicProps` (r:1 w:1) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Blacklist` (r:1 w:0) + /// Proof: `Democracy::Blacklist` (`max_values`: None, `max_size`: Some(3238), added: 5713, mode: `MaxEncodedLen`) + /// Storage: `Democracy::DepositOf` (r:0 w:1) + /// Proof: `Democracy::DepositOf` (`max_values`: None, `max_size`: Some(3230), added: 5705, mode: `MaxEncodedLen`) fn propose() -> Weight { // Proof Size summary in bytes: - // Measured: `4801` + // Measured: `4834` // Estimated: `18187` - // Minimum execution time: 49_339_000 picoseconds. - Weight::from_parts(50_942_000, 18187) + // Minimum execution time: 39_930_000 picoseconds. + Weight::from_parts(41_746_000, 18187) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) + /// Storage: `Democracy::DepositOf` (r:1 w:1) + /// Proof: `Democracy::DepositOf` (`max_values`: None, `max_size`: Some(3230), added: 5705, mode: `MaxEncodedLen`) fn second() -> Weight { // Proof Size summary in bytes: - // Measured: `3556` + // Measured: `3589` // Estimated: `6695` - // Minimum execution time: 43_291_000 picoseconds. - Weight::from_parts(44_856_000, 6695) + // Minimum execution time: 36_490_000 picoseconds. + Weight::from_parts(37_615_000, 6695) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn vote_new() -> Weight { // Proof Size summary in bytes: - // Measured: `3470` + // Measured: `3503` // Estimated: `7260` - // Minimum execution time: 61_890_000 picoseconds. - Weight::from_parts(63_626_000, 7260) + // Minimum execution time: 54_257_000 picoseconds. + Weight::from_parts(55_912_000, 7260) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn vote_existing() -> Weight { // Proof Size summary in bytes: - // Measured: `3492` + // Measured: `3525` // Estimated: `7260` - // Minimum execution time: 67_802_000 picoseconds. - Weight::from_parts(69_132_000, 7260) + // Minimum execution time: 56_878_000 picoseconds. + Weight::from_parts(58_796_000, 7260) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy Cancellations (r:1 w:1) - /// Proof: Democracy Cancellations (max_values: None, max_size: Some(33), added: 2508, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Cancellations` (r:1 w:1) + /// Proof: `Democracy::Cancellations` (`max_values`: None, `max_size`: Some(33), added: 2508, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn emergency_cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `366` + // Measured: `399` // Estimated: `3666` - // Minimum execution time: 25_757_000 picoseconds. - Weight::from_parts(27_226_000, 3666) + // Minimum execution time: 22_700_000 picoseconds. + Weight::from_parts(23_539_000, 3666) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:3 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:0 w:1) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:1 w:1) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::DepositOf` (r:1 w:1) + /// Proof: `Democracy::DepositOf` (`max_values`: None, `max_size`: Some(3230), added: 5705, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:3 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Democracy::NextExternal` (r:1 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Blacklist` (r:0 w:1) + /// Proof: `Democracy::Blacklist` (`max_values`: None, `max_size`: Some(3238), added: 5713, mode: `MaxEncodedLen`) fn blacklist() -> Weight { // Proof Size summary in bytes: - // Measured: `5910` + // Measured: `5943` // Estimated: `18187` - // Minimum execution time: 113_060_000 picoseconds. - Weight::from_parts(114_813_000, 18187) + // Minimum execution time: 95_398_000 picoseconds. + Weight::from_parts(97_261_000, 18187) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:0) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Blacklist` (r:1 w:0) + /// Proof: `Democracy::Blacklist` (`max_values`: None, `max_size`: Some(3238), added: 5713, mode: `MaxEncodedLen`) fn external_propose() -> Weight { // Proof Size summary in bytes: - // Measured: `3416` + // Measured: `3449` // Estimated: `6703` - // Minimum execution time: 13_413_000 picoseconds. - Weight::from_parts(13_794_000, 6703) + // Minimum execution time: 11_745_000 picoseconds. + Weight::from_parts(12_304_000, 6703) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy NextExternal (r:0 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:0 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) fn external_propose_majority() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_213_000 picoseconds. - Weight::from_parts(3_429_000, 0) + // Minimum execution time: 2_710_000 picoseconds. + Weight::from_parts(2_918_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy NextExternal (r:0 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:0 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) fn external_propose_default() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_280_000 picoseconds. - Weight::from_parts(3_389_000, 0) + // Minimum execution time: 2_664_000 picoseconds. + Weight::from_parts(2_776_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:1) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:2) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:0 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumCount` (r:1 w:1) + /// Proof: `Democracy::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:2) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:0 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) fn fast_track() -> Weight { // Proof Size summary in bytes: - // Measured: `286` + // Measured: `319` // Estimated: `3518` - // Minimum execution time: 28_142_000 picoseconds. - Weight::from_parts(28_862_000, 3518) + // Minimum execution time: 22_585_000 picoseconds. + Weight::from_parts(23_689_000, 3518) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:1) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:1) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::Blacklist` (r:1 w:1) + /// Proof: `Democracy::Blacklist` (`max_values`: None, `max_size`: Some(3238), added: 5713, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn veto_external() -> Weight { // Proof Size summary in bytes: - // Measured: `3519` + // Measured: `3552` // Estimated: `6703` - // Minimum execution time: 32_395_000 picoseconds. - Weight::from_parts(33_617_000, 6703) + // Minimum execution time: 26_391_000 picoseconds. + Weight::from_parts(27_141_000, 6703) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:1 w:1) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::DepositOf` (r:1 w:1) + /// Proof: `Democracy::DepositOf` (`max_values`: None, `max_size`: Some(3230), added: 5705, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn cancel_proposal() -> Weight { // Proof Size summary in bytes: - // Measured: `5821` + // Measured: `5854` // Estimated: `18187` - // Minimum execution time: 92_255_000 picoseconds. - Weight::from_parts(93_704_000, 18187) + // Minimum execution time: 77_905_000 picoseconds. + Weight::from_parts(79_628_000, 18187) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:0 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:0 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) fn cancel_referendum() -> Weight { // Proof Size summary in bytes: - // Measured: `271` + // Measured: `304` // Estimated: `3518` - // Minimum execution time: 19_623_000 picoseconds. - Weight::from_parts(20_545_000, 3518) + // Minimum execution time: 15_735_000 picoseconds. + Weight::from_parts(16_525_000, 3518) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Democracy LowestUnbaked (r:1 w:1) - /// Proof: Democracy LowestUnbaked (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:0) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::LowestUnbaked` (r:1 w:1) + /// Proof: `Democracy::LowestUnbaked` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumCount` (r:1 w:0) + /// Proof: `Democracy::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:99 w:0) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn on_initialize_base(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `244 + r * (86 Β±0)` + // Measured: `277 + r * (86 Β±0)` // Estimated: `1489 + r * (2676 Β±0)` - // Minimum execution time: 7_032_000 picoseconds. - Weight::from_parts(7_931_421, 1489) - // Standard Error: 7_395 - .saturating_add(Weight::from_parts(3_236_964, 0).saturating_mul(r.into())) + // Minimum execution time: 5_274_000 picoseconds. + Weight::from_parts(6_162_399, 1489) + // Standard Error: 6_924 + .saturating_add(Weight::from_parts(3_186_702, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) } - /// Storage: Democracy LowestUnbaked (r:1 w:1) - /// Proof: Democracy LowestUnbaked (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:0) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy LastTabledWasExternal (r:1 w:0) - /// Proof: Democracy LastTabledWasExternal (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::LowestUnbaked` (r:1 w:1) + /// Proof: `Democracy::LowestUnbaked` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumCount` (r:1 w:0) + /// Proof: `Democracy::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Democracy::LastTabledWasExternal` (r:1 w:0) + /// Proof: `Democracy::LastTabledWasExternal` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Democracy::NextExternal` (r:1 w:0) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::PublicProps` (r:1 w:0) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:99 w:0) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `244 + r * (86 Β±0)` + // Measured: `277 + r * (86 Β±0)` // Estimated: `18187 + r * (2676 Β±0)` - // Minimum execution time: 10_524_000 picoseconds. - Weight::from_parts(10_369_064, 18187) - // Standard Error: 8_385 - .saturating_add(Weight::from_parts(3_242_334, 0).saturating_mul(r.into())) + // Minimum execution time: 7_950_000 picoseconds. + Weight::from_parts(7_381_228, 18187) + // Standard Error: 6_650 + .saturating_add(Weight::from_parts(3_198_515, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) } - /// Storage: Democracy VotingOf (r:3 w:3) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:99) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Democracy::VotingOf` (r:3 w:3) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:99 w:99) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn delegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `830 + r * (108 Β±0)` + // Measured: `863 + r * (108 Β±0)` // Estimated: `19800 + r * (2676 Β±0)` - // Minimum execution time: 46_106_000 picoseconds. - Weight::from_parts(48_936_654, 19800) - // Standard Error: 8_879 - .saturating_add(Weight::from_parts(4_708_141, 0).saturating_mul(r.into())) + // Minimum execution time: 40_109_000 picoseconds. + Weight::from_parts(43_164_384, 19800) + // Standard Error: 7_267 + .saturating_add(Weight::from_parts(4_161_526, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) } - /// Storage: Democracy VotingOf (r:2 w:2) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:99) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) + /// Storage: `Democracy::VotingOf` (r:2 w:2) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Democracy::ReferendumInfoOf` (r:99 w:99) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn undelegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `493 + r * (108 Β±0)` + // Measured: `526 + r * (108 Β±0)` // Estimated: `13530 + r * (2676 Β±0)` - // Minimum execution time: 21_078_000 picoseconds. - Weight::from_parts(22_732_737, 13530) - // Standard Error: 7_969 - .saturating_add(Weight::from_parts(4_626_458, 0).saturating_mul(r.into())) + // Minimum execution time: 17_466_000 picoseconds. + Weight::from_parts(18_004_456, 13530) + // Standard Error: 6_327 + .saturating_add(Weight::from_parts(4_194_583, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) } - /// Storage: Democracy PublicProps (r:0 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:0 w:1) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) fn clear_public_proposals() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_229_000 picoseconds. - Weight::from_parts(3_415_000, 0) + // Minimum execution time: 2_824_000 picoseconds. + Weight::from_parts(2_948_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn unlock_remove(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `563` + // Measured: `596` // Estimated: `7260` - // Minimum execution time: 25_735_000 picoseconds. - Weight::from_parts(41_341_468, 7260) - // Standard Error: 3_727 - .saturating_add(Weight::from_parts(94_755, 0).saturating_mul(r.into())) + // Minimum execution time: 23_373_000 picoseconds. + Weight::from_parts(34_306_582, 7260) + // Standard Error: 2_849 + .saturating_add(Weight::from_parts(85_027, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 99]`. fn unlock_set(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `564 + r * (22 Β±0)` + // Measured: `597 + r * (22 Β±0)` // Estimated: `7260` - // Minimum execution time: 36_233_000 picoseconds. - Weight::from_parts(39_836_017, 7260) - // Standard Error: 1_791 - .saturating_add(Weight::from_parts(132_158, 0).saturating_mul(r.into())) + // Minimum execution time: 31_574_000 picoseconds. + Weight::from_parts(33_906_658, 7260) + // Standard Error: 1_514 + .saturating_add(Weight::from_parts(124_471, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 100]`. fn remove_vote(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `728 + r * (26 Β±0)` + // Measured: `761 + r * (26 Β±0)` // Estimated: `7260` - // Minimum execution time: 16_081_000 picoseconds. - Weight::from_parts(19_624_101, 7260) - // Standard Error: 1_639 - .saturating_add(Weight::from_parts(133_630, 0).saturating_mul(r.into())) + // Minimum execution time: 15_204_000 picoseconds. + Weight::from_parts(18_405_879, 7260) + // Standard Error: 1_851 + .saturating_add(Weight::from_parts(119_018, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:1) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::VotingOf` (r:1 w:1) + /// Proof: `Democracy::VotingOf` (`max_values`: None, `max_size`: Some(3795), added: 6270, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 100]`. fn remove_other_vote(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `728 + r * (26 Β±0)` + // Measured: `761 + r * (26 Β±0)` // Estimated: `7260` - // Minimum execution time: 15_634_000 picoseconds. - Weight::from_parts(19_573_407, 7260) - // Standard Error: 1_790 - .saturating_add(Weight::from_parts(139_707, 0).saturating_mul(r.into())) + // Minimum execution time: 15_120_000 picoseconds. + Weight::from_parts(18_282_222, 7260) + // Standard Error: 1_669 + .saturating_add(Weight::from_parts(127_649, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:0) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:0 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_external_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `356` + // Measured: `456` // Estimated: `3556` - // Minimum execution time: 18_344_000 picoseconds. - Weight::from_parts(18_727_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Minimum execution time: 17_351_000 picoseconds. + Weight::from_parts(17_964_000, 3556) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::NextExternal` (r:1 w:0) + /// Proof: `Democracy::NextExternal` (`max_values`: Some(1), `max_size`: Some(132), added: 627, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn clear_external_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `286` + // Measured: `319` // Estimated: `3518` - // Minimum execution time: 16_497_000 picoseconds. - Weight::from_parts(16_892_000, 3518) + // Minimum execution time: 13_669_000 picoseconds. + Weight::from_parts(14_410_000, 3518) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:1 w:0) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:0 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_proposal_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `4888` + // Measured: `4988` // Estimated: `18187` - // Minimum execution time: 39_517_000 picoseconds. - Weight::from_parts(40_632_000, 18187) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Minimum execution time: 39_162_000 picoseconds. + Weight::from_parts(40_109_000, 18187) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::PublicProps` (r:1 w:0) + /// Proof: `Democracy::PublicProps` (`max_values`: Some(1), `max_size`: Some(16702), added: 17197, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn clear_proposal_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `4822` + // Measured: `4855` // Estimated: `18187` - // Minimum execution time: 37_108_000 picoseconds. - Weight::from_parts(37_599_000, 18187) + // Minimum execution time: 34_141_000 picoseconds. + Weight::from_parts(34_732_000, 18187) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:0 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_referendum_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 13_997_000 picoseconds. - Weight::from_parts(14_298_000, 3556) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 13_413_000 picoseconds. + Weight::from_parts(14_039_000, 3556) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Democracy ReferendumInfoOf (r:1 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) + /// Storage: `Democracy::ReferendumInfoOf` (r:1 w:0) + /// Proof: `Democracy::ReferendumInfoOf` (`max_values`: None, `max_size`: Some(201), added: 2676, mode: `MaxEncodedLen`) + /// Storage: `Democracy::MetadataOf` (r:1 w:1) + /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn clear_referendum_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `302` + // Measured: `335` // Estimated: `3666` - // Minimum execution time: 18_122_000 picoseconds. - Weight::from_parts(18_655_000, 3666) + // Minimum execution time: 16_010_000 picoseconds. + Weight::from_parts(16_474_000, 3666) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index 18dcd7061c1..312dc557090 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -441,7 +441,7 @@ where type Extrinsic = Extrinsic; } -pub type Extrinsic = sp_runtime::testing::TestXt; +pub type Extrinsic = sp_runtime::generic::UncheckedExtrinsic; parameter_types! { pub MaxNominations: u32 = ::LIMIT as u32; diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs index 94348181334..94cfd059b6c 100644 --- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs +++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs @@ -1813,7 +1813,7 @@ mod tests { let encoded = pool.read().transactions[0].clone(); let extrinsic: Extrinsic = codec::Decode::decode(&mut &*encoded).unwrap(); - let call = extrinsic.call; + let call = extrinsic.function; assert!(matches!(call, RuntimeCall::MultiPhase(Call::submit_unsigned { .. }))); }) } @@ -1830,7 +1830,7 @@ mod tests { let encoded = pool.read().transactions[0].clone(); let extrinsic = Extrinsic::decode(&mut &*encoded).unwrap(); - let call = match extrinsic.call { + let call = match extrinsic.function { RuntimeCall::MultiPhase(call @ Call::submit_unsigned { .. }) => call, _ => panic!("bad call: unexpected submission"), }; diff --git a/substrate/frame/election-provider-multi-phase/src/weights.rs b/substrate/frame/election-provider-multi-phase/src/weights.rs index be578fac8c4..ed3e942716e 100644 --- a/substrate/frame/election-provider-multi-phase/src/weights.rs +++ b/substrate/frame/election-provider-multi-phase/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_election_provider_multi_phase +//! Autogenerated weights for `pallet_election_provider_multi_phase` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/election-provider-multi-phase/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/election-provider-multi-phase/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_election_provider_multi_phase. +/// Weight functions needed for `pallet_election_provider_multi_phase`. pub trait WeightInfo { fn on_initialize_nothing() -> Weight; fn on_initialize_open_signed() -> Weight; @@ -64,169 +63,171 @@ pub trait WeightInfo { fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight; } -/// Weights for pallet_election_provider_multi_phase using the Substrate node and recommended hardware. +/// Weights for `pallet_election_provider_multi_phase` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CurrentPlannedSession (r:1 w:0) - /// Proof: Staking CurrentPlannedSession (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasStartSessionIndex (r:1 w:0) - /// Proof: Staking ErasStartSessionIndex (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Babe EpochIndex (r:1 w:0) - /// Proof: Babe EpochIndex (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe GenesisSlot (r:1 w:0) - /// Proof: Babe GenesisSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe CurrentSlot (r:1 w:0) - /// Proof: Babe CurrentSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Staking ForceEra (r:1 w:0) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentPlannedSession` (r:1 w:0) + /// Proof: `Staking::CurrentPlannedSession` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:0) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Babe::EpochIndex` (r:1 w:0) + /// Proof: `Babe::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::GenesisSlot` (r:1 w:0) + /// Proof: `Babe::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::CurrentSlot` (r:1 w:0) + /// Proof: `Babe::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Staking::ForceEra` (r:1 w:0) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_nothing() -> Weight { // Proof Size summary in bytes: - // Measured: `1028` + // Measured: `1061` // Estimated: `3481` - // Minimum execution time: 22_089_000 picoseconds. - Weight::from_parts(22_677_000, 3481) + // Minimum execution time: 19_340_000 picoseconds. + Weight::from_parts(19_886_000, 3481) .saturating_add(T::DbWeight::get().reads(8_u64)) } - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_open_signed() -> Weight { // Proof Size summary in bytes: // Measured: `148` // Estimated: `1633` - // Minimum execution time: 11_986_000 picoseconds. - Weight::from_parts(12_445_000, 1633) + // Minimum execution time: 8_067_000 picoseconds. + Weight::from_parts(8_508_000, 1633) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_open_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `148` // Estimated: `1633` - // Minimum execution time: 12_988_000 picoseconds. - Weight::from_parts(13_281_000, 1633) + // Minimum execution time: 8_810_000 picoseconds. + Weight::from_parts(9_061_000, 1633) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase QueuedSolution (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::QueuedSolution` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::QueuedSolution` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn finalize_signed_phase_accept_solution() -> Weight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 32_659_000 picoseconds. - Weight::from_parts(33_281_000, 3593) + // Minimum execution time: 24_339_000 picoseconds. + Weight::from_parts(25_322_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn finalize_signed_phase_reject_solution() -> Weight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 22_471_000 picoseconds. - Weight::from_parts(23_046_000, 3593) + // Minimum execution time: 16_635_000 picoseconds. + Weight::from_parts(17_497_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::SnapshotMetadata` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::SnapshotMetadata` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::DesiredTargets` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::DesiredTargets` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Snapshot` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::Snapshot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[1000, 2000]`. /// The range of component `t` is `[500, 1000]`. fn create_snapshot_internal(v: u32, _t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 262_360_000 picoseconds. - Weight::from_parts(279_313_000, 0) - // Standard Error: 2_384 - .saturating_add(Weight::from_parts(176_415, 0).saturating_mul(v.into())) + // Minimum execution time: 170_730_000 picoseconds. + Weight::from_parts(175_009_000, 0) + // Standard Error: 2_010 + .saturating_add(Weight::from_parts(224_974, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionIndices (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionNextIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionsMap (max_values: None, max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase QueuedSolution (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase QueuedSolution (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Round (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionIndices` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SnapshotMetadata` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SnapshotMetadata` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionsMap` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionsMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::QueuedSolution` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::QueuedSolution` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::DesiredTargets` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::DesiredTargets` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Snapshot` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::Snapshot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `a` is `[500, 800]`. /// The range of component `d` is `[200, 400]`. fn elect_queued(a: u32, d: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `371 + a * (768 Β±0) + d * (48 Β±0)` // Estimated: `3923 + a * (768 Β±0) + d * (49 Β±0)` - // Minimum execution time: 301_283_000 picoseconds. - Weight::from_parts(324_586_000, 3923) - // Standard Error: 4_763 - .saturating_add(Weight::from_parts(279_812, 0).saturating_mul(a.into())) + // Minimum execution time: 280_705_000 picoseconds. + Weight::from_parts(303_018_000, 3923) + // Standard Error: 4_633 + .saturating_add(Weight::from_parts(307_274, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) .saturating_add(Weight::from_parts(0, 768).saturating_mul(a.into())) .saturating_add(Weight::from_parts(0, 49).saturating_mul(d.into())) } - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) - /// Proof: TransactionPayment NextFeeMultiplier (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionIndices (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionNextIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionsMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SnapshotMetadata` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::SnapshotMetadata` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionIndices` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionsMap` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionsMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn submit() -> Weight { // Proof Size summary in bytes: // Measured: `927` // Estimated: `2412` - // Minimum execution time: 52_276_000 picoseconds. - Weight::from_parts(53_846_000, 2412) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Minimum execution time: 43_405_000 picoseconds. + Weight::from_parts(45_734_000, 2412) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase QueuedSolution (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase QueuedSolution (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase MinimumUntrustedScore (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::DesiredTargets` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::DesiredTargets` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::QueuedSolution` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::QueuedSolution` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SnapshotMetadata` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::SnapshotMetadata` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Snapshot` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Snapshot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::MinimumUntrustedScore` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::MinimumUntrustedScore` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[1000, 2000]`. /// The range of component `t` is `[500, 1000]`. /// The range of component `a` is `[500, 800]`. @@ -235,25 +236,25 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `253 + t * (32 Β±0) + v * (553 Β±0)` // Estimated: `1738 + t * (32 Β±0) + v * (553 Β±0)` - // Minimum execution time: 5_448_459_000 picoseconds. - Weight::from_parts(5_525_622_000, 1738) - // Standard Error: 21_478 - .saturating_add(Weight::from_parts(256_345, 0).saturating_mul(v.into())) - // Standard Error: 63_648 - .saturating_add(Weight::from_parts(5_103_224, 0).saturating_mul(a.into())) + // Minimum execution time: 5_059_092_000 picoseconds. + Weight::from_parts(5_263_076_000, 1738) + // Standard Error: 17_317 + .saturating_add(Weight::from_parts(384_051, 0).saturating_mul(v.into())) + // Standard Error: 51_319 + .saturating_add(Weight::from_parts(4_095_128, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) } - /// Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase MinimumUntrustedScore (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::DesiredTargets` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::DesiredTargets` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Snapshot` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Snapshot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::MinimumUntrustedScore` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::MinimumUntrustedScore` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[1000, 2000]`. /// The range of component `t` is `[500, 1000]`. /// The range of component `a` is `[500, 800]`. @@ -262,180 +263,182 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `228 + t * (32 Β±0) + v * (553 Β±0)` // Estimated: `1713 + t * (32 Β±0) + v * (553 Β±0)` - // Minimum execution time: 4_724_399_000 picoseconds. - Weight::from_parts(4_886_472_000, 1713) - // Standard Error: 15_220 - .saturating_add(Weight::from_parts(365_569, 0).saturating_mul(v.into())) - // Standard Error: 45_104 - .saturating_add(Weight::from_parts(3_176_675, 0).saturating_mul(a.into())) + // Minimum execution time: 4_426_416_000 picoseconds. + Weight::from_parts(4_466_923_000, 1713) + // Standard Error: 15_415 + .saturating_add(Weight::from_parts(334_047, 0).saturating_mul(v.into())) + // Standard Error: 45_682 + .saturating_add(Weight::from_parts(3_097_318, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CurrentPlannedSession (r:1 w:0) - /// Proof: Staking CurrentPlannedSession (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasStartSessionIndex (r:1 w:0) - /// Proof: Staking ErasStartSessionIndex (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: Babe EpochIndex (r:1 w:0) - /// Proof: Babe EpochIndex (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe GenesisSlot (r:1 w:0) - /// Proof: Babe GenesisSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe CurrentSlot (r:1 w:0) - /// Proof: Babe CurrentSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Staking ForceEra (r:1 w:0) - /// Proof: Staking ForceEra (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentPlannedSession` (r:1 w:0) + /// Proof: `Staking::CurrentPlannedSession` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:0) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Babe::EpochIndex` (r:1 w:0) + /// Proof: `Babe::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::GenesisSlot` (r:1 w:0) + /// Proof: `Babe::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::CurrentSlot` (r:1 w:0) + /// Proof: `Babe::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Staking::ForceEra` (r:1 w:0) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_nothing() -> Weight { // Proof Size summary in bytes: - // Measured: `1028` + // Measured: `1061` // Estimated: `3481` - // Minimum execution time: 22_089_000 picoseconds. - Weight::from_parts(22_677_000, 3481) + // Minimum execution time: 19_340_000 picoseconds. + Weight::from_parts(19_886_000, 3481) .saturating_add(RocksDbWeight::get().reads(8_u64)) } - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_open_signed() -> Weight { // Proof Size summary in bytes: // Measured: `148` // Estimated: `1633` - // Minimum execution time: 11_986_000 picoseconds. - Weight::from_parts(12_445_000, 1633) + // Minimum execution time: 8_067_000 picoseconds. + Weight::from_parts(8_508_000, 1633) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_open_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `148` // Estimated: `1633` - // Minimum execution time: 12_988_000 picoseconds. - Weight::from_parts(13_281_000, 1633) + // Minimum execution time: 8_810_000 picoseconds. + Weight::from_parts(9_061_000, 1633) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase QueuedSolution (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::QueuedSolution` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::QueuedSolution` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn finalize_signed_phase_accept_solution() -> Weight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 32_659_000 picoseconds. - Weight::from_parts(33_281_000, 3593) + // Minimum execution time: 24_339_000 picoseconds. + Weight::from_parts(25_322_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn finalize_signed_phase_reject_solution() -> Weight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 22_471_000 picoseconds. - Weight::from_parts(23_046_000, 3593) + // Minimum execution time: 16_635_000 picoseconds. + Weight::from_parts(17_497_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::SnapshotMetadata` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::SnapshotMetadata` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::DesiredTargets` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::DesiredTargets` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Snapshot` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::Snapshot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[1000, 2000]`. /// The range of component `t` is `[500, 1000]`. fn create_snapshot_internal(v: u32, _t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 262_360_000 picoseconds. - Weight::from_parts(279_313_000, 0) - // Standard Error: 2_384 - .saturating_add(Weight::from_parts(176_415, 0).saturating_mul(v.into())) + // Minimum execution time: 170_730_000 picoseconds. + Weight::from_parts(175_009_000, 0) + // Standard Error: 2_010 + .saturating_add(Weight::from_parts(224_974, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionIndices (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionNextIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionsMap (max_values: None, max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase QueuedSolution (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase QueuedSolution (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Round (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionIndices` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SnapshotMetadata` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SnapshotMetadata` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionsMap` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionsMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::QueuedSolution` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::QueuedSolution` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::DesiredTargets` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::DesiredTargets` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Snapshot` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::Snapshot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `a` is `[500, 800]`. /// The range of component `d` is `[200, 400]`. fn elect_queued(a: u32, d: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `371 + a * (768 Β±0) + d * (48 Β±0)` // Estimated: `3923 + a * (768 Β±0) + d * (49 Β±0)` - // Minimum execution time: 301_283_000 picoseconds. - Weight::from_parts(324_586_000, 3923) - // Standard Error: 4_763 - .saturating_add(Weight::from_parts(279_812, 0).saturating_mul(a.into())) + // Minimum execution time: 280_705_000 picoseconds. + Weight::from_parts(303_018_000, 3923) + // Standard Error: 4_633 + .saturating_add(Weight::from_parts(307_274, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) .saturating_add(Weight::from_parts(0, 768).saturating_mul(a.into())) .saturating_add(Weight::from_parts(0, 49).saturating_mul(d.into())) } - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) - /// Proof: TransactionPayment NextFeeMultiplier (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionIndices (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionNextIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) - /// Proof Skipped: ElectionProviderMultiPhase SignedSubmissionsMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SnapshotMetadata` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::SnapshotMetadata` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionIndices` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::SignedSubmissionsMap` (r:0 w:1) + /// Proof: `ElectionProviderMultiPhase::SignedSubmissionsMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn submit() -> Weight { // Proof Size summary in bytes: // Measured: `927` // Estimated: `2412` - // Minimum execution time: 52_276_000 picoseconds. - Weight::from_parts(53_846_000, 2412) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Minimum execution time: 43_405_000 picoseconds. + Weight::from_parts(45_734_000, 2412) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase QueuedSolution (r:1 w:1) - /// Proof Skipped: ElectionProviderMultiPhase QueuedSolution (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase SnapshotMetadata (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase MinimumUntrustedScore (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::DesiredTargets` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::DesiredTargets` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::QueuedSolution` (r:1 w:1) + /// Proof: `ElectionProviderMultiPhase::QueuedSolution` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::SnapshotMetadata` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::SnapshotMetadata` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Snapshot` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Snapshot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::MinimumUntrustedScore` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::MinimumUntrustedScore` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[1000, 2000]`. /// The range of component `t` is `[500, 1000]`. /// The range of component `a` is `[500, 800]`. @@ -444,25 +447,25 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `253 + t * (32 Β±0) + v * (553 Β±0)` // Estimated: `1738 + t * (32 Β±0) + v * (553 Β±0)` - // Minimum execution time: 5_448_459_000 picoseconds. - Weight::from_parts(5_525_622_000, 1738) - // Standard Error: 21_478 - .saturating_add(Weight::from_parts(256_345, 0).saturating_mul(v.into())) - // Standard Error: 63_648 - .saturating_add(Weight::from_parts(5_103_224, 0).saturating_mul(a.into())) + // Minimum execution time: 5_059_092_000 picoseconds. + Weight::from_parts(5_263_076_000, 1738) + // Standard Error: 17_317 + .saturating_add(Weight::from_parts(384_051, 0).saturating_mul(v.into())) + // Standard Error: 51_319 + .saturating_add(Weight::from_parts(4_095_128, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) } - /// Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase DesiredTargets (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Snapshot (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase Round (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase Round (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase MinimumUntrustedScore (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `ElectionProviderMultiPhase::DesiredTargets` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::DesiredTargets` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Snapshot` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Snapshot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::Round` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ElectionProviderMultiPhase::MinimumUntrustedScore` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::MinimumUntrustedScore` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `v` is `[1000, 2000]`. /// The range of component `t` is `[500, 1000]`. /// The range of component `a` is `[500, 800]`. @@ -471,12 +474,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `228 + t * (32 Β±0) + v * (553 Β±0)` // Estimated: `1713 + t * (32 Β±0) + v * (553 Β±0)` - // Minimum execution time: 4_724_399_000 picoseconds. - Weight::from_parts(4_886_472_000, 1713) - // Standard Error: 15_220 - .saturating_add(Weight::from_parts(365_569, 0).saturating_mul(v.into())) - // Standard Error: 45_104 - .saturating_add(Weight::from_parts(3_176_675, 0).saturating_mul(a.into())) + // Minimum execution time: 4_426_416_000 picoseconds. + Weight::from_parts(4_466_923_000, 1713) + // Standard Error: 15_415 + .saturating_add(Weight::from_parts(334_047, 0).saturating_mul(v.into())) + // Standard Error: 45_682 + .saturating_add(Weight::from_parts(3_097_318, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 882b894bb22..7fade251e6d 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -62,7 +62,7 @@ pub const INIT_TIMESTAMP: BlockNumber = 30_000; pub const BLOCK_TIME: BlockNumber = 1000; type Block = frame_system::mocking::MockBlockU32; -type Extrinsic = testing::TestXt; +type Extrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Runtime { @@ -699,7 +699,7 @@ pub fn roll_to_with_ocw(n: BlockNumber, pool: Arc>, delay_solu for encoded in &pool.read().transactions { let extrinsic = Extrinsic::decode(&mut &encoded[..]).unwrap(); - let _ = match extrinsic.call { + let _ = match extrinsic.function { RuntimeCall::ElectionProviderMultiPhase( call @ Call::submit_unsigned { .. }, ) => { diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs index 308f2cdc1aa..e08412a6e87 100644 --- a/substrate/frame/elections-phragmen/src/lib.rs +++ b/substrate/frame/elections-phragmen/src/lib.rs @@ -1422,7 +1422,7 @@ mod tests { pub type Block = sp_runtime::generic::Block; pub type UncheckedExtrinsic = - sp_runtime::generic::UncheckedExtrinsic; + sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Test diff --git a/substrate/frame/elections-phragmen/src/weights.rs b/substrate/frame/elections-phragmen/src/weights.rs index b7ed13dae9f..cd67918e85b 100644 --- a/substrate/frame/elections-phragmen/src/weights.rs +++ b/substrate/frame/elections-phragmen/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_elections_phragmen +//! Autogenerated weights for `pallet_elections_phragmen` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/elections-phragmen/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/elections-phragmen/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_elections_phragmen. +/// Weight functions needed for `pallet_elections_phragmen`. pub trait WeightInfo { fn vote_equal(v: u32, ) -> Weight; fn vote_more(v: u32, ) -> Weight; @@ -66,165 +65,165 @@ pub trait WeightInfo { fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight; } -/// Weights for pallet_elections_phragmen using the Substrate node and recommended hardware. +/// Weights for `pallet_elections_phragmen` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Elections Candidates (r:1 w:0) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Voting (r:1 w:1) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Elections::Candidates` (r:1 w:0) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Voting` (r:1 w:1) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + v * (80 Β±0)` // Estimated: `4764 + v * (80 Β±0)` - // Minimum execution time: 33_028_000 picoseconds. - Weight::from_parts(34_073_914, 4764) - // Standard Error: 3_474 - .saturating_add(Weight::from_parts(205_252, 0).saturating_mul(v.into())) + // Minimum execution time: 29_390_000 picoseconds. + Weight::from_parts(30_525_476, 4764) + // Standard Error: 3_185 + .saturating_add(Weight::from_parts(143_073, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } - /// Storage: Elections Candidates (r:1 w:0) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Voting (r:1 w:1) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Elections::Candidates` (r:1 w:0) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Voting` (r:1 w:1) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `371 + v * (80 Β±0)` // Estimated: `4764 + v * (80 Β±0)` - // Minimum execution time: 45_725_000 picoseconds. - Weight::from_parts(47_169_586, 4764) - // Standard Error: 5_148 - .saturating_add(Weight::from_parts(213_742, 0).saturating_mul(v.into())) + // Minimum execution time: 39_765_000 picoseconds. + Weight::from_parts(41_374_102, 4764) + // Standard Error: 4_310 + .saturating_add(Weight::from_parts(153_015, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } - /// Storage: Elections Candidates (r:1 w:0) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Voting (r:1 w:1) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Elections::Candidates` (r:1 w:0) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Voting` (r:1 w:1) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + v * (80 Β±0)` // Estimated: `4764 + v * (80 Β±0)` - // Minimum execution time: 45_519_000 picoseconds. - Weight::from_parts(47_339_108, 4764) - // Standard Error: 5_501 - .saturating_add(Weight::from_parts(195_247, 0).saturating_mul(v.into())) + // Minimum execution time: 39_647_000 picoseconds. + Weight::from_parts(41_474_523, 4764) + // Standard Error: 5_503 + .saturating_add(Weight::from_parts(149_029, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } - /// Storage: Elections Voting (r:1 w:1) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Elections::Voting` (r:1 w:1) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn remove_voter() -> Weight { // Proof Size summary in bytes: // Measured: `925` // Estimated: `4764` - // Minimum execution time: 50_386_000 picoseconds. - Weight::from_parts(51_378_000, 4764) + // Minimum execution time: 41_882_000 picoseconds. + Weight::from_parts(42_794_000, 4764) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Elections Candidates (r:1 w:1) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Candidates` (r:1 w:1) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 64]`. fn submit_candidacy(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `1570 + c * (48 Β±0)` // Estimated: `3055 + c * (48 Β±0)` - // Minimum execution time: 38_987_000 picoseconds. - Weight::from_parts(41_302_276, 3055) - // Standard Error: 2_047 - .saturating_add(Weight::from_parts(125_200, 0).saturating_mul(c.into())) + // Minimum execution time: 33_719_000 picoseconds. + Weight::from_parts(35_017_073, 3055) + // Standard Error: 1_587 + .saturating_add(Weight::from_parts(121_130, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) } - /// Storage: Elections Candidates (r:1 w:1) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Candidates` (r:1 w:1) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 64]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `285 + c * (48 Β±0)` // Estimated: `1770 + c * (48 Β±0)` - // Minimum execution time: 33_510_000 picoseconds. - Weight::from_parts(34_947_760, 1770) - // Standard Error: 1_781 - .saturating_add(Weight::from_parts(78_851, 0).saturating_mul(c.into())) + // Minimum execution time: 27_263_000 picoseconds. + Weight::from_parts(28_215_666, 1770) + // Standard Error: 1_196 + .saturating_add(Weight::from_parts(86_804, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) } - /// Storage: Elections Members (r:1 w:1) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:1) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Members` (r:1 w:1) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:1) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:1 w:1) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:0) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:0 w:1) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn renounce_candidacy_members() -> Weight { // Proof Size summary in bytes: - // Measured: `1900` - // Estimated: `3385` - // Minimum execution time: 50_603_000 picoseconds. - Weight::from_parts(51_715_000, 3385) + // Measured: `1933` + // Estimated: `3418` + // Minimum execution time: 41_531_000 picoseconds. + Weight::from_parts(42_937_000, 3418) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Elections RunnersUp (r:1 w:1) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::RunnersUp` (r:1 w:1) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn renounce_candidacy_runners_up() -> Weight { // Proof Size summary in bytes: // Measured: `880` // Estimated: `2365` - // Minimum execution time: 33_441_000 picoseconds. - Weight::from_parts(34_812_000, 2365) + // Minimum execution time: 27_680_000 picoseconds. + Weight::from_parts(28_810_000, 2365) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn remove_member_without_replacement() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -232,87 +231,90 @@ impl WeightInfo for SubstrateWeight { // Minimum execution time: 2_000_000_000_000 picoseconds. Weight::from_parts(2_000_000_000_000, 0) } - /// Storage: Elections Members (r:1 w:1) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Elections RunnersUp (r:1 w:1) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Members` (r:1 w:1) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Elections::RunnersUp` (r:1 w:1) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:1 w:1) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:0) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:0 w:1) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn remove_member_with_replacement() -> Weight { // Proof Size summary in bytes: - // Measured: `1900` + // Measured: `1933` // Estimated: `3593` - // Minimum execution time: 57_289_000 picoseconds. - Weight::from_parts(58_328_000, 3593) + // Minimum execution time: 45_333_000 picoseconds. + Weight::from_parts(46_523_000, 3593) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Elections Voting (r:513 w:512) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Candidates (r:1 w:0) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Balances Locks (r:512 w:512) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:512 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:512 w:512) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Elections::Voting` (r:257 w:256) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Candidates` (r:1 w:0) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:256 w:256) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:256 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:256 w:256) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `v` is `[256, 512]`. /// The range of component `d` is `[0, 256]`. - fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { + fn clean_defunct_voters(v: u32, d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1149 + v * (811 Β±0)` - // Estimated: `4621 + v * (3774 Β±0)` - // Minimum execution time: 18_774_231_000 picoseconds. - Weight::from_parts(18_933_040_000, 4621) - // Standard Error: 301_534 - .saturating_add(Weight::from_parts(44_306_903, 0).saturating_mul(v.into())) + // Measured: `0 + d * (818 Β±0) + v * (57 Β±0)` + // Estimated: `24906 + d * (3774 Β±1) + v * (24 Β±0)` + // Minimum execution time: 5_620_000 picoseconds. + Weight::from_parts(5_817_000, 24906) + // Standard Error: 18_357 + .saturating_add(Weight::from_parts(106_164, 0).saturating_mul(v.into())) + // Standard Error: 39_980 + .saturating_add(Weight::from_parts(52_456_337, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(v.into()))) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into()))) - .saturating_add(Weight::from_parts(0, 3774).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(d.into()))) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(d.into()))) + .saturating_add(Weight::from_parts(0, 3774).saturating_mul(d.into())) + .saturating_add(Weight::from_parts(0, 24).saturating_mul(v.into())) } - /// Storage: Elections Candidates (r:1 w:1) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:1) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:1) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Voting (r:513 w:0) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:44 w:44) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Elections ElectionRounds (r:1 w:1) - /// Proof Skipped: Elections ElectionRounds (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:0 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Candidates` (r:1 w:1) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:1) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:1) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Voting` (r:513 w:0) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:0) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:44 w:44) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Elections::ElectionRounds` (r:1 w:1) + /// Proof: `Elections::ElectionRounds` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:0 w:1) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:0 w:1) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 64]`. /// The range of component `v` is `[1, 512]`. /// The range of component `e` is `[512, 8192]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + e * (28 Β±0) + v * (606 Β±0)` - // Estimated: `178887 + c * (2135 Β±7) + e * (12 Β±0) + v * (2653 Β±6)` - // Minimum execution time: 1_281_877_000 picoseconds. - Weight::from_parts(1_288_147_000, 178887) - // Standard Error: 528_851 - .saturating_add(Weight::from_parts(17_761_407, 0).saturating_mul(v.into())) - // Standard Error: 33_932 - .saturating_add(Weight::from_parts(698_277, 0).saturating_mul(e.into())) + // Estimated: `178920 + c * (2135 Β±7) + e * (12 Β±0) + v * (2653 Β±6)` + // Minimum execution time: 1_082_582_000 picoseconds. + Weight::from_parts(1_084_730_000, 178920) + // Standard Error: 594_096 + .saturating_add(Weight::from_parts(19_096_288, 0).saturating_mul(v.into())) + // Standard Error: 38_118 + .saturating_add(Weight::from_parts(792_945, 0).saturating_mul(e.into())) .saturating_add(T::DbWeight::get().reads(21_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) @@ -324,164 +326,164 @@ impl WeightInfo for SubstrateWeight { } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Elections Candidates (r:1 w:0) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Voting (r:1 w:1) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Elections::Candidates` (r:1 w:0) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Voting` (r:1 w:1) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + v * (80 Β±0)` // Estimated: `4764 + v * (80 Β±0)` - // Minimum execution time: 33_028_000 picoseconds. - Weight::from_parts(34_073_914, 4764) - // Standard Error: 3_474 - .saturating_add(Weight::from_parts(205_252, 0).saturating_mul(v.into())) + // Minimum execution time: 29_390_000 picoseconds. + Weight::from_parts(30_525_476, 4764) + // Standard Error: 3_185 + .saturating_add(Weight::from_parts(143_073, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } - /// Storage: Elections Candidates (r:1 w:0) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Voting (r:1 w:1) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Elections::Candidates` (r:1 w:0) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Voting` (r:1 w:1) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `371 + v * (80 Β±0)` // Estimated: `4764 + v * (80 Β±0)` - // Minimum execution time: 45_725_000 picoseconds. - Weight::from_parts(47_169_586, 4764) - // Standard Error: 5_148 - .saturating_add(Weight::from_parts(213_742, 0).saturating_mul(v.into())) + // Minimum execution time: 39_765_000 picoseconds. + Weight::from_parts(41_374_102, 4764) + // Standard Error: 4_310 + .saturating_add(Weight::from_parts(153_015, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } - /// Storage: Elections Candidates (r:1 w:0) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Voting (r:1 w:1) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Elections::Candidates` (r:1 w:0) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Voting` (r:1 w:1) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + v * (80 Β±0)` // Estimated: `4764 + v * (80 Β±0)` - // Minimum execution time: 45_519_000 picoseconds. - Weight::from_parts(47_339_108, 4764) - // Standard Error: 5_501 - .saturating_add(Weight::from_parts(195_247, 0).saturating_mul(v.into())) + // Minimum execution time: 39_647_000 picoseconds. + Weight::from_parts(41_474_523, 4764) + // Standard Error: 5_503 + .saturating_add(Weight::from_parts(149_029, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) } - /// Storage: Elections Voting (r:1 w:1) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `Elections::Voting` (r:1 w:1) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn remove_voter() -> Weight { // Proof Size summary in bytes: // Measured: `925` // Estimated: `4764` - // Minimum execution time: 50_386_000 picoseconds. - Weight::from_parts(51_378_000, 4764) + // Minimum execution time: 41_882_000 picoseconds. + Weight::from_parts(42_794_000, 4764) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Elections Candidates (r:1 w:1) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Candidates` (r:1 w:1) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 64]`. fn submit_candidacy(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `1570 + c * (48 Β±0)` // Estimated: `3055 + c * (48 Β±0)` - // Minimum execution time: 38_987_000 picoseconds. - Weight::from_parts(41_302_276, 3055) - // Standard Error: 2_047 - .saturating_add(Weight::from_parts(125_200, 0).saturating_mul(c.into())) + // Minimum execution time: 33_719_000 picoseconds. + Weight::from_parts(35_017_073, 3055) + // Standard Error: 1_587 + .saturating_add(Weight::from_parts(121_130, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) } - /// Storage: Elections Candidates (r:1 w:1) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Candidates` (r:1 w:1) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 64]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `285 + c * (48 Β±0)` // Estimated: `1770 + c * (48 Β±0)` - // Minimum execution time: 33_510_000 picoseconds. - Weight::from_parts(34_947_760, 1770) - // Standard Error: 1_781 - .saturating_add(Weight::from_parts(78_851, 0).saturating_mul(c.into())) + // Minimum execution time: 27_263_000 picoseconds. + Weight::from_parts(28_215_666, 1770) + // Standard Error: 1_196 + .saturating_add(Weight::from_parts(86_804, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) } - /// Storage: Elections Members (r:1 w:1) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:1) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Members` (r:1 w:1) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:1) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:1 w:1) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:0) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:0 w:1) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn renounce_candidacy_members() -> Weight { // Proof Size summary in bytes: - // Measured: `1900` - // Estimated: `3385` - // Minimum execution time: 50_603_000 picoseconds. - Weight::from_parts(51_715_000, 3385) + // Measured: `1933` + // Estimated: `3418` + // Minimum execution time: 41_531_000 picoseconds. + Weight::from_parts(42_937_000, 3418) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Elections RunnersUp (r:1 w:1) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::RunnersUp` (r:1 w:1) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn renounce_candidacy_runners_up() -> Weight { // Proof Size summary in bytes: // Measured: `880` // Estimated: `2365` - // Minimum execution time: 33_441_000 picoseconds. - Weight::from_parts(34_812_000, 2365) + // Minimum execution time: 27_680_000 picoseconds. + Weight::from_parts(28_810_000, 2365) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn remove_member_without_replacement() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -489,87 +491,90 @@ impl WeightInfo for () { // Minimum execution time: 2_000_000_000_000 picoseconds. Weight::from_parts(2_000_000_000_000, 0) } - /// Storage: Elections Members (r:1 w:1) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Elections RunnersUp (r:1 w:1) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Members` (r:1 w:1) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Elections::RunnersUp` (r:1 w:1) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:1 w:1) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:0) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:0 w:1) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn remove_member_with_replacement() -> Weight { // Proof Size summary in bytes: - // Measured: `1900` + // Measured: `1933` // Estimated: `3593` - // Minimum execution time: 57_289_000 picoseconds. - Weight::from_parts(58_328_000, 3593) + // Minimum execution time: 45_333_000 picoseconds. + Weight::from_parts(46_523_000, 3593) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Elections Voting (r:513 w:512) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:0) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Candidates (r:1 w:0) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Balances Locks (r:512 w:512) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:512 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: System Account (r:512 w:512) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Elections::Voting` (r:257 w:256) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:0) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Candidates` (r:1 w:0) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Locks` (r:256 w:256) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:256 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:256 w:256) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `v` is `[256, 512]`. /// The range of component `d` is `[0, 256]`. - fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { + fn clean_defunct_voters(v: u32, d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1149 + v * (811 Β±0)` - // Estimated: `4621 + v * (3774 Β±0)` - // Minimum execution time: 18_774_231_000 picoseconds. - Weight::from_parts(18_933_040_000, 4621) - // Standard Error: 301_534 - .saturating_add(Weight::from_parts(44_306_903, 0).saturating_mul(v.into())) + // Measured: `0 + d * (818 Β±0) + v * (57 Β±0)` + // Estimated: `24906 + d * (3774 Β±1) + v * (24 Β±0)` + // Minimum execution time: 5_620_000 picoseconds. + Weight::from_parts(5_817_000, 24906) + // Standard Error: 18_357 + .saturating_add(Weight::from_parts(106_164, 0).saturating_mul(v.into())) + // Standard Error: 39_980 + .saturating_add(Weight::from_parts(52_456_337, 0).saturating_mul(d.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(v.into()))) - .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(v.into()))) - .saturating_add(Weight::from_parts(0, 3774).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(d.into()))) + .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(d.into()))) + .saturating_add(Weight::from_parts(0, 3774).saturating_mul(d.into())) + .saturating_add(Weight::from_parts(0, 24).saturating_mul(v.into())) } - /// Storage: Elections Candidates (r:1 w:1) - /// Proof Skipped: Elections Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:1) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections RunnersUp (r:1 w:1) - /// Proof Skipped: Elections RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Elections Voting (r:513 w:0) - /// Proof Skipped: Elections Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:44 w:44) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Elections ElectionRounds (r:1 w:1) - /// Proof Skipped: Elections ElectionRounds (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:0 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Elections::Candidates` (r:1 w:1) + /// Proof: `Elections::Candidates` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:1) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::RunnersUp` (r:1 w:1) + /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Voting` (r:513 w:0) + /// Proof: `Elections::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:0) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:44 w:44) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Elections::ElectionRounds` (r:1 w:1) + /// Proof: `Elections::ElectionRounds` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Members` (r:0 w:1) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Prime` (r:0 w:1) + /// Proof: `Council::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 64]`. /// The range of component `v` is `[1, 512]`. /// The range of component `e` is `[512, 8192]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + e * (28 Β±0) + v * (606 Β±0)` - // Estimated: `178887 + c * (2135 Β±7) + e * (12 Β±0) + v * (2653 Β±6)` - // Minimum execution time: 1_281_877_000 picoseconds. - Weight::from_parts(1_288_147_000, 178887) - // Standard Error: 528_851 - .saturating_add(Weight::from_parts(17_761_407, 0).saturating_mul(v.into())) - // Standard Error: 33_932 - .saturating_add(Weight::from_parts(698_277, 0).saturating_mul(e.into())) + // Estimated: `178920 + c * (2135 Β±7) + e * (12 Β±0) + v * (2653 Β±6)` + // Minimum execution time: 1_082_582_000 picoseconds. + Weight::from_parts(1_084_730_000, 178920) + // Standard Error: 594_096 + .saturating_add(Weight::from_parts(19_096_288, 0).saturating_mul(v.into())) + // Standard Error: 38_118 + .saturating_add(Weight::from_parts(792_945, 0).saturating_mul(e.into())) .saturating_add(RocksDbWeight::get().reads(21_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) diff --git a/substrate/frame/examples/basic/src/lib.rs b/substrate/frame/examples/basic/src/lib.rs index 12cadc969fd..94b2f276e00 100644 --- a/substrate/frame/examples/basic/src/lib.rs +++ b/substrate/frame/examples/basic/src/lib.rs @@ -46,9 +46,10 @@ //! use the [`Config::WeightInfo`] trait to calculate call weights. This can also be overridden, //! as demonstrated by [`Call::set_dummy`]. //! - A private function that performs a storage update. -//! - A simple signed extension implementation (see: [`sp_runtime::traits::SignedExtension`]) which -//! increases the priority of the [`Call::set_dummy`] if it's present and drops any transaction -//! with an encoded length higher than 200 bytes. +//! - A simple transaction extension implementation (see: +//! [`sp_runtime::traits::TransactionExtension`]) which increases the priority of the +//! [`Call::set_dummy`] if it's present and drops any transaction with an encoded length higher +//! than 200 bytes. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -64,10 +65,12 @@ use frame_system::ensure_signed; use log::info; use scale_info::TypeInfo; use sp_runtime::{ - traits::{Bounded, DispatchInfoOf, SaturatedConversion, Saturating, SignedExtension}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + impl_tx_ext_default, + traits::{ + Bounded, DispatchInfoOf, OriginOf, SaturatedConversion, Saturating, TransactionExtension, + TransactionExtensionBase, ValidateResult, }, + transaction_validity::{InvalidTransaction, ValidTransaction}, }; use sp_std::vec::Vec; @@ -440,8 +443,8 @@ impl Pallet { // Similar to other FRAME pallets, your pallet can also define a signed extension and perform some // checks and [pre/post]processing [before/after] the transaction. A signed extension can be any -// decodable type that implements `SignedExtension`. See the trait definition for the full list of -// bounds. As a convention, you can follow this approach to create an extension for your pallet: +// decodable type that implements `TransactionExtension`. See the trait definition for the full list +// of bounds. As a convention, you can follow this approach to create an extension for your pallet: // - If the extension does not carry any data, then use a tuple struct with just a `marker` // (needed for the compiler to accept `T: Config`) will suffice. // - Otherwise, create a tuple struct which contains the external data. Of course, for the entire @@ -455,18 +458,18 @@ impl Pallet { // // Using the extension, you can add some hooks to the life cycle of each transaction. Note that by // default, an extension is applied to all `Call` functions (i.e. all transactions). the `Call` enum -// variant is given to each function of `SignedExtension`. Hence, you can filter based on pallet or -// a particular call if needed. +// variant is given to each function of `TransactionExtension`. Hence, you can filter based on +// pallet or a particular call if needed. // // Some extra information, such as encoded length, some static dispatch info like weight and the // sender of the transaction (if signed) are also provided. // // The full list of hooks that can be added to a signed extension can be found -// [here](https://paritytech.github.io/polkadot-sdk/master/sp_runtime/traits/trait.SignedExtension.html). +// [here](https://paritytech.github.io/polkadot-sdk/master/sp_runtime/traits/trait.TransactionExtension.html). // // The signed extensions are aggregated in the runtime file of a substrate chain. All extensions // should be aggregated in a tuple and passed to the `CheckedExtrinsic` and `UncheckedExtrinsic` -// types defined in the runtime. Lookup `pub type SignedExtra = (...)` in `node/runtime` and +// types defined in the runtime. Lookup `pub type TxExtension = (...)` in `node/runtime` and // `node-template` for an example of this. /// A simple signed extension that checks for the `set_dummy` call. In that case, it increases the @@ -484,52 +487,45 @@ impl core::fmt::Debug for WatchDummy { } } -impl SignedExtension for WatchDummy +impl TransactionExtensionBase for WatchDummy { + const IDENTIFIER: &'static str = "WatchDummy"; + type Implicit = (); +} +impl + TransactionExtension<::RuntimeCall, Context> for WatchDummy where ::RuntimeCall: IsSubType>, { - const IDENTIFIER: &'static str = "WatchDummy"; - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = (); type Pre = (); - - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } + type Val = (); fn validate( &self, - _who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + origin: OriginOf<::RuntimeCall>, + call: &::RuntimeCall, + _info: &DispatchInfoOf<::RuntimeCall>, len: usize, - ) -> TransactionValidity { + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult::RuntimeCall> { // if the transaction is too big, just drop it. if len > 200 { - return InvalidTransaction::ExhaustsResources.into() + return Err(InvalidTransaction::ExhaustsResources.into()) } // check for `set_dummy` - match call.is_sub_type() { + let validity = match call.is_sub_type() { Some(Call::set_dummy { .. }) => { sp_runtime::print("set_dummy was received."); let valid_tx = ValidTransaction { priority: Bounded::max_value(), ..Default::default() }; - Ok(valid_tx) + valid_tx }, - _ => Ok(Default::default()), - } + _ => Default::default(), + }; + Ok((validity, (), origin)) } + impl_tx_ext_default!(::RuntimeCall; Context; prepare); } diff --git a/substrate/frame/examples/basic/src/tests.rs b/substrate/frame/examples/basic/src/tests.rs index 207e46e428d..e460ad0992f 100644 --- a/substrate/frame/examples/basic/src/tests.rs +++ b/substrate/frame/examples/basic/src/tests.rs @@ -27,7 +27,7 @@ use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, + traits::{BlakeTwo256, DispatchTransaction, IdentityLookup}, BuildStorage, }; // Reexport crate as its pallet name for construct_runtime. @@ -158,13 +158,16 @@ fn signed_ext_watch_dummy_works() { assert_eq!( WatchDummy::(PhantomData) - .validate(&1, &call, &info, 150) + .validate_only(Some(1).into(), &call, &info, 150) .unwrap() + .0 .priority, u64::MAX, ); assert_eq!( - WatchDummy::(PhantomData).validate(&1, &call, &info, 250), + WatchDummy::(PhantomData) + .validate_only(Some(1).into(), &call, &info, 250) + .unwrap_err(), InvalidTransaction::ExhaustsResources.into(), ); }) diff --git a/substrate/frame/examples/offchain-worker/src/tests.rs b/substrate/frame/examples/offchain-worker/src/tests.rs index ea37a2da493..81f2a40840b 100644 --- a/substrate/frame/examples/offchain-worker/src/tests.rs +++ b/substrate/frame/examples/offchain-worker/src/tests.rs @@ -30,7 +30,7 @@ use sp_core::{ use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; use sp_runtime::{ - testing::TestXt, + generic::UncheckedExtrinsic, traits::{BlakeTwo256, Extrinsic as ExtrinsicT, IdentifyAccount, IdentityLookup, Verify}, RuntimeAppPublic, }; @@ -73,7 +73,7 @@ impl frame_system::Config for Test { type MaxConsumers = ConstU32<16>; } -type Extrinsic = TestXt; +type Extrinsic = UncheckedExtrinsic; type AccountId = <::Signer as IdentifyAccount>::AccountId; impl frame_system::offchain::SigningTypes for Test { @@ -99,7 +99,7 @@ where _account: AccountId, nonce: u64, ) -> Option<(RuntimeCall, ::SignaturePayload)> { - Some((call, (nonce, ()))) + Some((call, (nonce, (), ()))) } } @@ -219,8 +219,8 @@ fn should_submit_signed_transaction_on_chain() { let tx = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature.unwrap().0, 0); - assert_eq!(tx.call, RuntimeCall::Example(crate::Call::submit_price { price: 15523 })); + assert!(matches!(tx.preamble, sp_runtime::generic::Preamble::Signed(0, (), ()))); + assert_eq!(tx.function, RuntimeCall::Example(crate::Call::submit_price { price: 15523 })); }); } @@ -259,11 +259,11 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { // then let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature, None); + assert!(tx.is_inherent()); if let RuntimeCall::Example(crate::Call::submit_price_unsigned_with_signed_payload { price_payload: body, signature, - }) = tx.call + }) = tx.function { assert_eq!(body, price_payload); @@ -313,11 +313,11 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { // then let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature, None); + assert!(tx.is_inherent()); if let RuntimeCall::Example(crate::Call::submit_price_unsigned_with_signed_payload { price_payload: body, signature, - }) = tx.call + }) = tx.function { assert_eq!(body, price_payload); @@ -353,9 +353,9 @@ fn should_submit_raw_unsigned_transaction_on_chain() { let tx = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature, None); + assert!(tx.is_inherent()); assert_eq!( - tx.call, + tx.function, RuntimeCall::Example(crate::Call::submit_price_unsigned { block_number: 1, price: 15523 diff --git a/substrate/frame/examples/tasks/src/weights.rs b/substrate/frame/examples/tasks/src/weights.rs index 793af6e9622..c9ddea6f9a8 100644 --- a/substrate/frame/examples/tasks/src/weights.rs +++ b/substrate/frame/examples/tasks/src/weights.rs @@ -15,30 +15,31 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for `pallet_example_tasks` +//! Autogenerated weights for `tasks_example` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-02, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `MacBook.local`, CPU: `` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/release/node-template +// ./target/production/substrate-node // benchmark // pallet -// --chain -// dev -// --pallet -// pallet_example_tasks -// --extrinsic -// * -// --steps -// 20 -// --repeat -// 10 -// --output -// frame/examples/tasks/src/weights.rs +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=tasks_example +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/examples/tasks/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,37 +49,42 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_template. +/// Weight functions needed for `tasks_example`. pub trait WeightInfo { fn add_number_into_total() -> Weight; } -/// Weight functions for `pallet_example_kitchensink`. +/// Weights for `tasks_example` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Kitchensink OtherFoo (r:0 w:1) - /// Proof Skipped: Kitchensink OtherFoo (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TasksExample::Numbers` (r:1 w:1) + /// Proof: `TasksExample::Numbers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `TasksExample::Total` (r:1 w:1) + /// Proof: `TasksExample::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn add_number_into_total() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_000_000 picoseconds. - Weight::from_parts(1_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) + // Measured: `149` + // Estimated: `3614` + // Minimum execution time: 5_776_000 picoseconds. + Weight::from_parts(6_178_000, 3614) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } } +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Kitchensink OtherFoo (r:0 w:1) - /// Proof Skipped: Kitchensink OtherFoo (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TasksExample::Numbers` (r:1 w:1) + /// Proof: `TasksExample::Numbers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `TasksExample::Total` (r:1 w:1) + /// Proof: `TasksExample::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn add_number_into_total() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_000_000 picoseconds. - Weight::from_parts(1_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(RocksDbWeight::get().writes(1)) + // Measured: `149` + // Estimated: `3614` + // Minimum execution time: 5_776_000 picoseconds. + Weight::from_parts(6_178_000, 3614) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/substrate/frame/executive/src/tests.rs b/substrate/frame/executive/src/tests.rs index 70b55f6e855..a3f70a9fc3c 100644 --- a/substrate/frame/executive/src/tests.rs +++ b/substrate/frame/executive/src/tests.rs @@ -327,10 +327,42 @@ impl frame_system::Config for Runtime { type Balance = u64; +pub struct BalancesWeights; +impl pallet_balances::WeightInfo for BalancesWeights { + fn transfer_allow_death() -> Weight { + Weight::from_parts(25, 0) + } + fn transfer_keep_alive() -> Weight { + Weight::zero() + } + fn force_set_balance_creating() -> Weight { + Weight::zero() + } + fn force_set_balance_killing() -> Weight { + Weight::zero() + } + fn force_transfer() -> Weight { + Weight::zero() + } + fn transfer_all() -> Weight { + Weight::zero() + } + fn force_unreserve() -> Weight { + Weight::zero() + } + fn upgrade_accounts(_u: u32) -> Weight { + Weight::zero() + } + fn force_adjust_total_issuance() -> Weight { + Weight::zero() + } +} + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for Runtime { type Balance = Balance; type AccountStore = System; + type WeightInfo = BalancesWeights; } parameter_types! { @@ -343,6 +375,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = IdentityFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = (); + type WeightInfo = (); } impl custom::Config for Runtime {} @@ -355,19 +388,46 @@ impl frame_support::traits::Get for RuntimeVersion { } } +#[derive(Clone, Debug, Encode, codec::Decode, PartialEq, Eq, scale_info::TypeInfo)] +pub struct AccountU64(u64); +impl sp_runtime::traits::IdentifyAccount for AccountU64 { + type AccountId = u64; + fn into_account(self) -> u64 { + self.0 + } +} + +impl sp_runtime::traits::Verify for AccountU64 { + type Signer = AccountU64; + fn verify>( + &self, + _msg: L, + _signer: &::AccountId, + ) -> bool { + true + } +} + +impl From for AccountU64 { + fn from(value: u64) -> Self { + Self(value) + } +} + parameter_types! { pub static RuntimeVersionTestValues: sp_version::RuntimeVersion = Default::default(); } -type SignedExtra = ( +type TxExtension = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, ); -type TestXt = sp_runtime::testing::TestXt; -type TestBlock = Block; +type UncheckedXt = + sp_runtime::generic::UncheckedExtrinsic; +type TestBlock = Block; // Will contain `true` when the custom runtime logic was called. const CUSTOM_ON_RUNTIME_KEY: &[u8] = b":custom:on_runtime"; @@ -387,7 +447,7 @@ impl OnRuntimeUpgrade for CustomOnRuntimeUpgrade { type Executive = super::Executive< Runtime, - Block, + Block, ChainContext, Runtime, AllPalletsWithSystem, @@ -462,17 +522,14 @@ impl MultiStepMigrator for MockedModeGetter { } } -fn extra(nonce: u64, fee: Balance) -> SignedExtra { +fn tx_ext(nonce: u64, fee: Balance) -> TxExtension { ( frame_system::CheckEra::from(Era::Immortal), frame_system::CheckNonce::from(nonce), frame_system::CheckWeight::new(), pallet_transaction_payment::ChargeTransactionPayment::from(fee), ) -} - -fn sign_extra(who: u64, nonce: u64, fee: Balance) -> Option<(u64, SignedExtra)> { - Some((who, extra(nonce, fee))) + .into() } fn call_transfer(dest: u64, value: u64) -> RuntimeCall { @@ -485,7 +542,7 @@ fn balance_transfer_dispatch_works() { pallet_balances::GenesisConfig:: { balances: vec![(1, 211)] } .assimilate_storage(&mut t) .unwrap(); - let xt = TestXt::new(call_transfer(2, 69), sign_extra(1, 0, 0)); + let xt = UncheckedXt::new_signed(call_transfer(2, 69), 1, 1.into(), tx_ext(0, 0)); let weight = xt.get_dispatch_info().weight + ::BlockWeights::get() .get(DispatchClass::Normal) @@ -596,7 +653,7 @@ fn block_import_of_bad_extrinsic_root_fails() { fn bad_extrinsic_not_inserted() { let mut t = new_test_ext(1); // bad nonce check! - let xt = TestXt::new(call_transfer(33, 69), sign_extra(1, 30, 0)); + let xt = UncheckedXt::new_signed(call_transfer(33, 69), 1, 1.into(), tx_ext(30, 0)); t.execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); assert_err!( @@ -610,27 +667,24 @@ fn bad_extrinsic_not_inserted() { #[test] fn block_weight_limit_enforced() { let mut t = new_test_ext(10000); - // given: TestXt uses the encoded len as fixed Len: - let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), - ); - let encoded = xt.encode(); - let encoded_len = encoded.len() as u64; + let transfer_weight = + <::WeightInfo as pallet_balances::WeightInfo>::transfer_allow_death(); // on_initialize weight + base block execution weight let block_weights = ::BlockWeights::get(); let base_block_weight = Weight::from_parts(175, 0) + block_weights.base_block; let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() - base_block_weight; - let num_to_exhaust_block = limit.ref_time() / (encoded_len + 5); + let num_to_exhaust_block = limit.ref_time() / (transfer_weight.ref_time() + 5); t.execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); // Base block execution weight + `on_initialize` weight from the custom module. assert_eq!(>::block_weight().total(), base_block_weight); for nonce in 0..=num_to_exhaust_block { - let xt = TestXt::new( + let xt = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, nonce.into(), 0), + 1, + 1.into(), + tx_ext(nonce.into(), 0), ); let res = Executive::apply_extrinsic(xt); if nonce != num_to_exhaust_block { @@ -638,7 +692,8 @@ fn block_weight_limit_enforced() { assert_eq!( >::block_weight().total(), //--------------------- on_initialize + block_execution + extrinsic_base weight - Weight::from_parts((encoded_len + 5) * (nonce + 1), 0) + base_block_weight, + Weight::from_parts((transfer_weight.ref_time() + 5) * (nonce + 1), 0) + + base_block_weight, ); assert_eq!( >::extrinsic_index(), @@ -653,19 +708,26 @@ fn block_weight_limit_enforced() { #[test] fn block_weight_and_size_is_stored_per_tx() { - let xt = TestXt::new( + let xt = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), + 1, + 1.into(), + tx_ext(0, 0), ); - let x1 = TestXt::new( + let x1 = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 1, 0), + 1, + 1.into(), + tx_ext(1, 0), ); - let x2 = TestXt::new( + let x2 = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 2, 0), + 1, + 1.into(), + tx_ext(2, 0), ); let len = xt.clone().encode().len() as u32; + let transfer_weight = <::WeightInfo as pallet_balances::WeightInfo>::transfer_allow_death(); let mut t = new_test_ext(1); t.execute_with(|| { // Block execution weight + on_initialize weight from custom module @@ -681,8 +743,7 @@ fn block_weight_and_size_is_stored_per_tx() { assert!(Executive::apply_extrinsic(x1.clone()).unwrap().is_ok()); assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); - // default weight for `TestXt` == encoded length. - let extrinsic_weight = Weight::from_parts(len as u64, 0) + + let extrinsic_weight = transfer_weight + ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; @@ -707,8 +768,8 @@ fn block_weight_and_size_is_stored_per_tx() { #[test] fn validate_unsigned() { - let valid = TestXt::new(RuntimeCall::Custom(custom::Call::allowed_unsigned {}), None); - let invalid = TestXt::new(RuntimeCall::Custom(custom::Call::unallowed_unsigned {}), None); + let valid = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::allowed_unsigned {})); + let invalid = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::unallowed_unsigned {})); let mut t = new_test_ext(1); t.execute_with(|| { @@ -745,9 +806,11 @@ fn can_not_pay_for_tx_fee_on_full_lock() { t.execute_with(|| { as fungible::MutateFreeze>::set_freeze(&(), &1, 110) .unwrap(); - let xt = TestXt::new( + let xt = UncheckedXt::new_signed( RuntimeCall::System(frame_system::Call::remark { remark: vec![1u8] }), - sign_extra(1, 0, 0), + 1, + 1.into(), + tx_ext(0, 0), ); Executive::initialize_block(&Header::new_from_number(1)); @@ -872,9 +935,11 @@ fn event_from_runtime_upgrade_is_included() { /// used through the `ExecuteBlock` trait. #[test] fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { - let xt = TestXt::new( + let xt = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), + 1, + 1.into(), + tx_ext(0, 0), ); let header = new_test_ext(1).execute_with(|| { @@ -902,7 +967,10 @@ fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); - >>::execute_block(Block::new(header, vec![xt])); + >>::execute_block(Block::new( + header, + vec![xt], + )); assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); @@ -968,7 +1036,7 @@ fn offchain_worker_works_as_expected() { #[test] fn calculating_storage_root_twice_works() { let call = RuntimeCall::Custom(custom::Call::calculate_storage_root {}); - let xt = TestXt::new(call, sign_extra(1, 0, 0)); + let xt = UncheckedXt::new_signed(call, 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -987,11 +1055,13 @@ fn calculating_storage_root_twice_works() { #[test] #[should_panic(expected = "Invalid inherent position for extrinsic at index 1")] fn invalid_inherent_position_fail() { - let xt1 = TestXt::new( + let xt1 = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), + 1, + 1.into(), + tx_ext(0, 0), ); - let xt2 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt2 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1010,8 +1080,8 @@ fn invalid_inherent_position_fail() { #[test] fn valid_inherents_position_works() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1031,7 +1101,12 @@ fn valid_inherents_position_works() { #[test] #[should_panic(expected = "A call was labelled as mandatory, but resulted in an Error.")] fn invalid_inherents_fail_block_execution() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_signed( + RuntimeCall::Custom(custom::Call::inherent {}), + 1, + 1.into(), + tx_ext(0, 0), + ); new_test_ext(1).execute_with(|| { Executive::execute_block(Block::new( @@ -1044,7 +1119,7 @@ fn invalid_inherents_fail_block_execution() { // Inherents are created by the runtime and don't need to be validated. #[test] fn inherents_fail_validate_block() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); new_test_ext(1).execute_with(|| { assert_eq!( @@ -1058,7 +1133,7 @@ fn inherents_fail_validate_block() { /// Inherents still work while `initialize_block` forbids transactions. #[test] fn inherents_ok_while_exts_forbidden_works() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); let header = new_test_ext(1).execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); @@ -1078,8 +1153,8 @@ fn inherents_ok_while_exts_forbidden_works() { #[test] #[should_panic = "Only inherents are allowed in this block"] fn transactions_in_only_inherents_block_errors() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); @@ -1099,8 +1174,8 @@ fn transactions_in_only_inherents_block_errors() { /// Same as above but no error. #[test] fn transactions_in_normal_block_works() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); @@ -1120,8 +1195,8 @@ fn transactions_in_normal_block_works() { #[test] #[cfg(feature = "try-runtime")] fn try_execute_block_works() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); @@ -1148,8 +1223,8 @@ fn try_execute_block_works() { #[cfg(feature = "try-runtime")] #[should_panic = "Only inherents allowed"] fn try_execute_tx_forbidden_errors() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1176,9 +1251,9 @@ fn try_execute_tx_forbidden_errors() { /// Check that `ensure_inherents_are_first` reports the correct indices. #[test] fn ensure_inherents_are_first_works() { - let in1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let in2 = TestXt::new(RuntimeCall::Custom2(custom2::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let in1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let in2 = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); // Mocked empty header: let header = new_test_ext(1).execute_with(|| { @@ -1256,18 +1331,20 @@ fn callbacks_in_block_execution_works_inner(mbms_active: bool) { for i in 0..n_in { let xt = if i % 2 == 0 { - TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None) + UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})) } else { - TestXt::new(RuntimeCall::Custom2(custom2::Call::optional_inherent {}), None) + UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::optional_inherent {})) }; Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); extrinsics.push(xt); } for t in 0..n_tx { - let xt = TestXt::new( + let xt = UncheckedXt::new_signed( RuntimeCall::Custom2(custom2::Call::some_call {}), - sign_extra(1, t as u64, 0), + 1, + 1.into(), + tx_ext(t as u64, 0), ); // Extrinsics can be applied even when MBMs are active. Only the `execute_block` // will reject it. @@ -1307,8 +1384,13 @@ fn callbacks_in_block_execution_works_inner(mbms_active: bool) { #[test] fn post_inherent_called_after_all_inherents() { - let in1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::inherent {}), None); - let xt1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::some_call {}), sign_extra(1, 0, 0)); + let in1 = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::inherent {})); + let xt1 = UncheckedXt::new_signed( + RuntimeCall::Custom2(custom2::Call::some_call {}), + 1, + 1.into(), + tx_ext(0, 0), + ); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1342,8 +1424,13 @@ fn post_inherent_called_after_all_inherents() { /// Regression test for AppSec finding #40. #[test] fn post_inherent_called_after_all_optional_inherents() { - let in1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::optional_inherent {}), None); - let xt1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::some_call {}), sign_extra(1, 0, 0)); + let in1 = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::optional_inherent {})); + let xt1 = UncheckedXt::new_signed( + RuntimeCall::Custom2(custom2::Call::some_call {}), + 1, + 1.into(), + tx_ext(0, 0), + ); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1376,14 +1463,14 @@ fn post_inherent_called_after_all_optional_inherents() { #[test] fn is_inherent_works() { - let ext = TestXt::new(RuntimeCall::Custom2(custom2::Call::inherent {}), None); + let ext = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::inherent {})); assert!(Runtime::is_inherent(&ext)); - let ext = TestXt::new(RuntimeCall::Custom2(custom2::Call::optional_inherent {}), None); + let ext = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::optional_inherent {})); assert!(Runtime::is_inherent(&ext)); - let ext = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let ext = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); assert!(!Runtime::is_inherent(&ext)); - let ext = TestXt::new(RuntimeCall::Custom2(custom2::Call::allowed_unsigned {}), None); + let ext = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::allowed_unsigned {})); assert!(!Runtime::is_inherent(&ext), "Unsigned ext are not automatically inherents"); } diff --git a/substrate/frame/fast-unstake/src/weights.rs b/substrate/frame/fast-unstake/src/weights.rs index 9c25a409f74..d783ba921bf 100644 --- a/substrate/frame/fast-unstake/src/weights.rs +++ b/substrate/frame/fast-unstake/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_fast_unstake +//! Autogenerated weights for `pallet_fast_unstake` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/fast-unstake/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/fast-unstake/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_fast_unstake. +/// Weight functions needed for `pallet_fast_unstake`. pub trait WeightInfo { fn on_idle_unstake(b: u32, ) -> Weight; fn on_idle_check(v: u32, b: u32, ) -> Weight; @@ -59,301 +58,305 @@ pub trait WeightInfo { fn control() -> Weight; } -/// Weights for pallet_fast_unstake using the Substrate node and recommended hardware. +/// Weights for `pallet_fast_unstake` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ValidatorCount (r:1 w:0) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: FastUnstake Head (r:1 w:1) - /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) - /// Storage: FastUnstake CounterForQueue (r:1 w:0) - /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:64 w:0) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Bonded (r:64 w:64) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:64 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:64 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: System Account (r:64 w:64) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:64 w:64) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:64 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:0 w:64) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:64) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Head` (r:1 w:1) + /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(5768), added: 6263, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::CounterForQueue` (r:1 w:0) + /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:64 w:0) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:64 w:64) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:64 w:64) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:64 w:64) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:64 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:64 w:64) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:64 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:64 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:64) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// The range of component `b` is `[1, 64]`. fn on_idle_unstake(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1378 + b * (343 Β±0)` + // Measured: `1475 + b * (452 Β±0)` // Estimated: `7253 + b * (3774 Β±0)` - // Minimum execution time: 92_847_000 picoseconds. - Weight::from_parts(42_300_813, 7253) - // Standard Error: 40_514 - .saturating_add(Weight::from_parts(58_412_402, 0).saturating_mul(b.into())) + // Minimum execution time: 89_005_000 picoseconds. + Weight::from_parts(50_257_055, 7253) + // Standard Error: 68_836 + .saturating_add(Weight::from_parts(57_329_950, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().reads((8_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 3774).saturating_mul(b.into())) } - /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ValidatorCount (r:1 w:0) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: FastUnstake Head (r:1 w:1) - /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) - /// Storage: FastUnstake CounterForQueue (r:1 w:0) - /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasStakers (r:257 w:0) - /// Proof Skipped: Staking ErasStakers (max_values: None, max_size: None, mode: Measured) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Head` (r:1 w:1) + /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(5768), added: 6263, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::CounterForQueue` (r:1 w:0) + /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakers` (r:1 w:0) + /// Proof: `Staking::ErasStakers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:257 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `v` is `[1, 256]`. /// The range of component `b` is `[1, 64]`. fn on_idle_check(v: u32, b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1546 + b * (48 Β±0) + v * (10037 Β±0)` - // Estimated: `7253 + b * (49 Β±0) + v * (12513 Β±0)` - // Minimum execution time: 1_685_784_000 picoseconds. - Weight::from_parts(1_693_370_000, 7253) - // Standard Error: 13_295_842 - .saturating_add(Weight::from_parts(425_349_148, 0).saturating_mul(v.into())) - // Standard Error: 53_198_180 - .saturating_add(Weight::from_parts(1_673_328_444, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Measured: `1879 + b * (55 Β±0) + v * (10055 Β±0)` + // Estimated: `7253 + b * (56 Β±0) + v * (12531 Β±0)` + // Minimum execution time: 1_737_131_000 picoseconds. + Weight::from_parts(1_746_770_000, 7253) + // Standard Error: 13_401_143 + .saturating_add(Weight::from_parts(426_946_450, 0).saturating_mul(v.into())) + // Standard Error: 53_619_501 + .saturating_add(Weight::from_parts(1_664_681_508, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 49).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 12513).saturating_mul(v.into())) + .saturating_add(Weight::from_parts(0, 56).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 12531).saturating_mul(v.into())) } - /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: FastUnstake Queue (r:1 w:1) - /// Proof: FastUnstake Queue (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) - /// Storage: FastUnstake Head (r:1 w:0) - /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:1 w:1) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: FastUnstake CounterForQueue (r:1 w:1) - /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Queue` (r:1 w:1) + /// Proof: `FastUnstake::Queue` (`max_values`: None, `max_size`: Some(56), added: 2531, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Head` (r:1 w:0) + /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(5768), added: 6263, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:1 w:1) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::CounterForQueue` (r:1 w:1) + /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn register_fast_unstake() -> Weight { // Proof Size summary in bytes: - // Measured: `1964` + // Measured: `1955` // Estimated: `7253` - // Minimum execution time: 125_512_000 picoseconds. - Weight::from_parts(129_562_000, 7253) + // Minimum execution time: 112_632_000 picoseconds. + Weight::from_parts(117_267_000, 7253) .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(9_u64)) } - /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: FastUnstake Queue (r:1 w:1) - /// Proof: FastUnstake Queue (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) - /// Storage: FastUnstake Head (r:1 w:0) - /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) - /// Storage: FastUnstake CounterForQueue (r:1 w:1) - /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Queue` (r:1 w:1) + /// Proof: `FastUnstake::Queue` (`max_values`: None, `max_size`: Some(56), added: 2531, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Head` (r:1 w:0) + /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(5768), added: 6263, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::CounterForQueue` (r:1 w:1) + /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `1223` + // Measured: `1251` // Estimated: `7253` - // Minimum execution time: 43_943_000 picoseconds. - Weight::from_parts(45_842_000, 7253) + // Minimum execution time: 39_253_000 picoseconds. + Weight::from_parts(40_053_000, 7253) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:0 w:1) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn control() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_677_000 picoseconds. - Weight::from_parts(2_849_000, 0) + // Minimum execution time: 2_386_000 picoseconds. + Weight::from_parts(2_508_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ValidatorCount (r:1 w:0) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: FastUnstake Head (r:1 w:1) - /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) - /// Storage: FastUnstake CounterForQueue (r:1 w:0) - /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking SlashingSpans (r:64 w:0) - /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) - /// Storage: Staking Bonded (r:64 w:64) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:64 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:64 w:0) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: System Account (r:64 w:64) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:64 w:64) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:64 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:0 w:64) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Staking Payee (r:0 w:64) - /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Head` (r:1 w:1) + /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(5768), added: 6263, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::CounterForQueue` (r:1 w:0) + /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:64 w:0) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:64 w:64) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:64 w:64) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:64 w:64) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:64 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:64 w:64) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:64 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:64 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:64) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// The range of component `b` is `[1, 64]`. fn on_idle_unstake(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1378 + b * (343 Β±0)` + // Measured: `1475 + b * (452 Β±0)` // Estimated: `7253 + b * (3774 Β±0)` - // Minimum execution time: 92_847_000 picoseconds. - Weight::from_parts(42_300_813, 7253) - // Standard Error: 40_514 - .saturating_add(Weight::from_parts(58_412_402, 0).saturating_mul(b.into())) + // Minimum execution time: 89_005_000 picoseconds. + Weight::from_parts(50_257_055, 7253) + // Standard Error: 68_836 + .saturating_add(Weight::from_parts(57_329_950, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().reads((7_u64).saturating_mul(b.into()))) + .saturating_add(RocksDbWeight::get().reads((8_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 3774).saturating_mul(b.into())) } - /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ValidatorCount (r:1 w:0) - /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: FastUnstake Head (r:1 w:1) - /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) - /// Storage: FastUnstake CounterForQueue (r:1 w:0) - /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) - /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking ErasStakers (r:257 w:0) - /// Proof Skipped: Staking ErasStakers (max_values: None, max_size: None, mode: Measured) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Head` (r:1 w:1) + /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(5768), added: 6263, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::CounterForQueue` (r:1 w:0) + /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) + /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakers` (r:1 w:0) + /// Proof: `Staking::ErasStakers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:257 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `v` is `[1, 256]`. /// The range of component `b` is `[1, 64]`. fn on_idle_check(v: u32, b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1546 + b * (48 Β±0) + v * (10037 Β±0)` - // Estimated: `7253 + b * (49 Β±0) + v * (12513 Β±0)` - // Minimum execution time: 1_685_784_000 picoseconds. - Weight::from_parts(1_693_370_000, 7253) - // Standard Error: 13_295_842 - .saturating_add(Weight::from_parts(425_349_148, 0).saturating_mul(v.into())) - // Standard Error: 53_198_180 - .saturating_add(Weight::from_parts(1_673_328_444, 0).saturating_mul(b.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Measured: `1879 + b * (55 Β±0) + v * (10055 Β±0)` + // Estimated: `7253 + b * (56 Β±0) + v * (12531 Β±0)` + // Minimum execution time: 1_737_131_000 picoseconds. + Weight::from_parts(1_746_770_000, 7253) + // Standard Error: 13_401_143 + .saturating_add(Weight::from_parts(426_946_450, 0).saturating_mul(v.into())) + // Standard Error: 53_619_501 + .saturating_add(Weight::from_parts(1_664_681_508, 0).saturating_mul(b.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 49).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 12513).saturating_mul(v.into())) + .saturating_add(Weight::from_parts(0, 56).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 12531).saturating_mul(v.into())) } - /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:1) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: FastUnstake Queue (r:1 w:1) - /// Proof: FastUnstake Queue (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) - /// Storage: FastUnstake Head (r:1 w:0) - /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) - /// Storage: Staking Bonded (r:1 w:0) - /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) - /// Storage: Staking Validators (r:1 w:0) - /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) - /// Storage: Staking Nominators (r:1 w:1) - /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) - /// Storage: Staking CounterForNominators (r:1 w:1) - /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: VoterList ListNodes (r:1 w:1) - /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) - /// Storage: VoterList ListBags (r:1 w:1) - /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) - /// Storage: VoterList CounterForListNodes (r:1 w:1) - /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking CurrentEra (r:1 w:0) - /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: FastUnstake CounterForQueue (r:1 w:1) - /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Queue` (r:1 w:1) + /// Proof: `FastUnstake::Queue` (`max_values`: None, `max_size`: Some(56), added: 2531, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Head` (r:1 w:0) + /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(5768), added: 6263, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:1 w:1) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::CounterForQueue` (r:1 w:1) + /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn register_fast_unstake() -> Weight { // Proof Size summary in bytes: - // Measured: `1964` + // Measured: `1955` // Estimated: `7253` - // Minimum execution time: 125_512_000 picoseconds. - Weight::from_parts(129_562_000, 7253) + // Minimum execution time: 112_632_000 picoseconds. + Weight::from_parts(117_267_000, 7253) .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().writes(9_u64)) } - /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: FastUnstake Queue (r:1 w:1) - /// Proof: FastUnstake Queue (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) - /// Storage: FastUnstake Head (r:1 w:0) - /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(5768), added: 6263, mode: MaxEncodedLen) - /// Storage: FastUnstake CounterForQueue (r:1 w:1) - /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Queue` (r:1 w:1) + /// Proof: `FastUnstake::Queue` (`max_values`: None, `max_size`: Some(56), added: 2531, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::Head` (r:1 w:0) + /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(5768), added: 6263, mode: `MaxEncodedLen`) + /// Storage: `FastUnstake::CounterForQueue` (r:1 w:1) + /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `1223` + // Measured: `1251` // Estimated: `7253` - // Minimum execution time: 43_943_000 picoseconds. - Weight::from_parts(45_842_000, 7253) + // Minimum execution time: 39_253_000 picoseconds. + Weight::from_parts(40_053_000, 7253) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) - /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:0 w:1) + /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn control() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_677_000 picoseconds. - Weight::from_parts(2_849_000, 0) + // Minimum execution time: 2_386_000 picoseconds. + Weight::from_parts(2_508_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/glutton/src/weights.rs b/substrate/frame/glutton/src/weights.rs index cbc0fb022f5..b2e28dc488a 100644 --- a/substrate/frame/glutton/src/weights.rs +++ b/substrate/frame/glutton/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_glutton +//! Autogenerated weights for `pallet_glutton` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/glutton/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/glutton/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_glutton. +/// Weight functions needed for `pallet_glutton`. pub trait WeightInfo { fn initialize_pallet_grow(n: u32, ) -> Weight; fn initialize_pallet_shrink(n: u32, ) -> Weight; @@ -63,39 +62,39 @@ pub trait WeightInfo { fn set_storage() -> Weight; } -/// Weights for pallet_glutton using the Substrate node and recommended hardware. +/// Weights for `pallet_glutton` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Glutton TrashDataCount (r:1 w:1) - /// Proof: Glutton TrashDataCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Glutton TrashData (r:0 w:1000) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::TrashDataCount` (r:1 w:1) + /// Proof: `Glutton::TrashDataCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:0 w:1000) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_grow(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1489` - // Minimum execution time: 11_488_000 picoseconds. - Weight::from_parts(93_073_710, 1489) - // Standard Error: 22_390 - .saturating_add(Weight::from_parts(9_572_012, 0).saturating_mul(n.into())) + // Minimum execution time: 8_443_000 picoseconds. + Weight::from_parts(103_698_651, 1489) + // Standard Error: 21_777 + .saturating_add(Weight::from_parts(9_529_476, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } - /// Storage: Glutton TrashDataCount (r:1 w:1) - /// Proof: Glutton TrashDataCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Glutton TrashData (r:0 w:1000) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::TrashDataCount` (r:1 w:1) + /// Proof: `Glutton::TrashDataCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:0 w:1000) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_shrink(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `119` // Estimated: `1489` - // Minimum execution time: 11_378_000 picoseconds. - Weight::from_parts(5_591_508, 1489) - // Standard Error: 1_592 - .saturating_add(Weight::from_parts(1_163_758, 0).saturating_mul(n.into())) + // Minimum execution time: 8_343_000 picoseconds. + Weight::from_parts(304_498, 1489) + // Standard Error: 1_568 + .saturating_add(Weight::from_parts(1_146_553, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -105,119 +104,119 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 669_000 picoseconds. - Weight::from_parts(990_745, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(105_224, 0).saturating_mul(i.into())) + // Minimum execution time: 656_000 picoseconds. + Weight::from_parts(1_875_128, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(103_381, 0).saturating_mul(i.into())) } - /// Storage: Glutton TrashData (r:5000 w:0) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::TrashData` (r:5000 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5000]`. fn waste_proof_size_some(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `119114 + i * (1022 Β±0)` // Estimated: `990 + i * (3016 Β±0)` - // Minimum execution time: 435_000 picoseconds. - Weight::from_parts(66_547_542, 990) - // Standard Error: 4_557 - .saturating_add(Weight::from_parts(5_851_324, 0).saturating_mul(i.into())) + // Minimum execution time: 454_000 picoseconds. + Weight::from_parts(521_000, 990) + // Standard Error: 1_940 + .saturating_add(Weight::from_parts(5_729_831, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) } - /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton TrashData (r:1737 w:0) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:1737 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) fn on_idle_high_proof_waste() -> Weight { // Proof Size summary in bytes: // Measured: `1900497` // Estimated: `5239782` - // Minimum execution time: 67_699_845_000 picoseconds. - Weight::from_parts(67_893_204_000, 5239782) + // Minimum execution time: 55_403_909_000 picoseconds. + Weight::from_parts(55_472_412_000, 5239782) .saturating_add(T::DbWeight::get().reads(1739_u64)) } - /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton TrashData (r:5 w:0) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:5 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) fn on_idle_low_proof_waste() -> Weight { // Proof Size summary in bytes: // Measured: `9547` // Estimated: `16070` - // Minimum execution time: 122_297_527_000 picoseconds. - Weight::from_parts(122_394_818_000, 16070) + // Minimum execution time: 97_959_007_000 picoseconds. + Weight::from_parts(98_030_476_000, 16070) .saturating_add(T::DbWeight::get().reads(7_u64)) } - /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn empty_on_idle() -> Weight { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1493` - // Minimum execution time: 5_882_000 picoseconds. - Weight::from_parts(6_138_000, 1493) + // Minimum execution time: 5_011_000 picoseconds. + Weight::from_parts(5_183_000, 1493) .saturating_add(T::DbWeight::get().reads(2_u64)) } - /// Storage: Glutton Compute (r:0 w:1) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Glutton::Compute` (r:0 w:1) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set_compute() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_830_000 picoseconds. - Weight::from_parts(8_366_000, 0) + // Minimum execution time: 5_591_000 picoseconds. + Weight::from_parts(5_970_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Glutton Storage (r:0 w:1) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Glutton::Storage` (r:0 w:1) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_933_000 picoseconds. - Weight::from_parts(8_213_000, 0) + // Minimum execution time: 5_689_000 picoseconds. + Weight::from_parts(6_038_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Glutton TrashDataCount (r:1 w:1) - /// Proof: Glutton TrashDataCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Glutton TrashData (r:0 w:1000) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::TrashDataCount` (r:1 w:1) + /// Proof: `Glutton::TrashDataCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:0 w:1000) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_grow(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1489` - // Minimum execution time: 11_488_000 picoseconds. - Weight::from_parts(93_073_710, 1489) - // Standard Error: 22_390 - .saturating_add(Weight::from_parts(9_572_012, 0).saturating_mul(n.into())) + // Minimum execution time: 8_443_000 picoseconds. + Weight::from_parts(103_698_651, 1489) + // Standard Error: 21_777 + .saturating_add(Weight::from_parts(9_529_476, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) } - /// Storage: Glutton TrashDataCount (r:1 w:1) - /// Proof: Glutton TrashDataCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Glutton TrashData (r:0 w:1000) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::TrashDataCount` (r:1 w:1) + /// Proof: `Glutton::TrashDataCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:0 w:1000) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_shrink(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `119` // Estimated: `1489` - // Minimum execution time: 11_378_000 picoseconds. - Weight::from_parts(5_591_508, 1489) - // Standard Error: 1_592 - .saturating_add(Weight::from_parts(1_163_758, 0).saturating_mul(n.into())) + // Minimum execution time: 8_343_000 picoseconds. + Weight::from_parts(304_498, 1489) + // Standard Error: 1_568 + .saturating_add(Weight::from_parts(1_146_553, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -227,83 +226,83 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 669_000 picoseconds. - Weight::from_parts(990_745, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(105_224, 0).saturating_mul(i.into())) + // Minimum execution time: 656_000 picoseconds. + Weight::from_parts(1_875_128, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(103_381, 0).saturating_mul(i.into())) } - /// Storage: Glutton TrashData (r:5000 w:0) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::TrashData` (r:5000 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5000]`. fn waste_proof_size_some(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `119114 + i * (1022 Β±0)` // Estimated: `990 + i * (3016 Β±0)` - // Minimum execution time: 435_000 picoseconds. - Weight::from_parts(66_547_542, 990) - // Standard Error: 4_557 - .saturating_add(Weight::from_parts(5_851_324, 0).saturating_mul(i.into())) + // Minimum execution time: 454_000 picoseconds. + Weight::from_parts(521_000, 990) + // Standard Error: 1_940 + .saturating_add(Weight::from_parts(5_729_831, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) } - /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton TrashData (r:1737 w:0) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:1737 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) fn on_idle_high_proof_waste() -> Weight { // Proof Size summary in bytes: // Measured: `1900497` // Estimated: `5239782` - // Minimum execution time: 67_699_845_000 picoseconds. - Weight::from_parts(67_893_204_000, 5239782) + // Minimum execution time: 55_403_909_000 picoseconds. + Weight::from_parts(55_472_412_000, 5239782) .saturating_add(RocksDbWeight::get().reads(1739_u64)) } - /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton TrashData (r:5 w:0) - /// Proof: Glutton TrashData (max_values: Some(65000), max_size: Some(1036), added: 3016, mode: MaxEncodedLen) + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:5 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) fn on_idle_low_proof_waste() -> Weight { // Proof Size summary in bytes: // Measured: `9547` // Estimated: `16070` - // Minimum execution time: 122_297_527_000 picoseconds. - Weight::from_parts(122_394_818_000, 16070) + // Minimum execution time: 97_959_007_000 picoseconds. + Weight::from_parts(98_030_476_000, 16070) .saturating_add(RocksDbWeight::get().reads(7_u64)) } - /// Storage: Glutton Storage (r:1 w:0) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Glutton Compute (r:1 w:0) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn empty_on_idle() -> Weight { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1493` - // Minimum execution time: 5_882_000 picoseconds. - Weight::from_parts(6_138_000, 1493) + // Minimum execution time: 5_011_000 picoseconds. + Weight::from_parts(5_183_000, 1493) .saturating_add(RocksDbWeight::get().reads(2_u64)) } - /// Storage: Glutton Compute (r:0 w:1) - /// Proof: Glutton Compute (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Glutton::Compute` (r:0 w:1) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set_compute() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_830_000 picoseconds. - Weight::from_parts(8_366_000, 0) + // Minimum execution time: 5_591_000 picoseconds. + Weight::from_parts(5_970_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Glutton Storage (r:0 w:1) - /// Proof: Glutton Storage (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Glutton::Storage` (r:0 w:1) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_933_000 picoseconds. - Weight::from_parts(8_213_000, 0) + // Minimum execution time: 5_689_000 picoseconds. + Weight::from_parts(6_038_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 5d48f974c31..e1be487dbb7 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -35,11 +35,8 @@ use sp_consensus_grandpa::{RoundNumber, SetId, GRANDPA_ENGINE_ID}; use sp_core::{crypto::KeyTypeId, H256}; use sp_keyring::Ed25519Keyring; use sp_runtime::{ - curve::PiecewiseLinear, - impl_opaque_keys, - testing::{TestXt, UintAuthorityId}, - traits::OpaqueKeys, - BuildStorage, DigestItem, Perbill, + curve::PiecewiseLinear, generic::UncheckedExtrinsic, impl_opaque_keys, + testing::UintAuthorityId, traits::OpaqueKeys, BuildStorage, DigestItem, Perbill, }; use sp_staking::{EraIndex, SessionIndex}; @@ -77,7 +74,7 @@ where RuntimeCall: From, { type OverarchingCall = RuntimeCall; - type Extrinsic = TestXt; + type Extrinsic = UncheckedExtrinsic; } parameter_types! { diff --git a/substrate/frame/identity/src/weights.rs b/substrate/frame/identity/src/weights.rs index 1feb8252c84..81de520d7f2 100644 --- a/substrate/frame/identity/src/weights.rs +++ b/substrate/frame/identity/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_identity +//! Autogenerated weights for `pallet_identity` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/identity/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/identity/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_identity. +/// Weight functions needed for `pallet_identity`. pub trait WeightInfo { fn add_registrar(r: u32, ) -> Weight; fn set_identity(r: u32, ) -> Weight; @@ -77,278 +76,274 @@ pub trait WeightInfo { fn remove_dangling_username() -> Weight; } -/// Weights for pallet_identity using the Substrate node and recommended hardware. +/// Weights for `pallet_identity` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `32 + r * (57 Β±0)` // Estimated: `2626` - // Minimum execution time: 11_683_000 picoseconds. - Weight::from_parts(12_515_830, 2626) - // Standard Error: 2_154 - .saturating_add(Weight::from_parts(147_919, 0).saturating_mul(r.into())) + // Minimum execution time: 8_857_000 picoseconds. + Weight::from_parts(9_331_464, 2626) + // Standard Error: 1_745 + .saturating_add(Weight::from_parts(94_096, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. - fn set_identity(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `442 + r * (5 Β±0)` - // Estimated: `11003` - // Minimum execution time: 32_949_000 picoseconds. - Weight::from_parts(31_329_634, 11003) - // Standard Error: 4_496 - .saturating_add(Weight::from_parts(203_570, 0).saturating_mul(r.into())) + fn set_identity(_r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `6978 + r * (5 Β±0)` + // Estimated: `11037` + // Minimum execution time: 96_522_000 picoseconds. + Weight::from_parts(102_738_605, 11037) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:100 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:100 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `101` - // Estimated: `11003 + s * (2589 Β±0)` - // Minimum execution time: 9_157_000 picoseconds. - Weight::from_parts(24_917_444, 11003) - // Standard Error: 4_554 - .saturating_add(Weight::from_parts(3_279_868, 0).saturating_mul(s.into())) + // Estimated: `11037 + s * (2589 Β±0)` + // Minimum execution time: 9_112_000 picoseconds. + Weight::from_parts(22_676_873, 11037) + // Standard Error: 6_494 + .saturating_add(Weight::from_parts(3_279_196, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into())) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 100]`. fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `194 + p * (32 Β±0)` - // Estimated: `11003` - // Minimum execution time: 9_240_000 picoseconds. - Weight::from_parts(23_326_035, 11003) - // Standard Error: 3_664 - .saturating_add(Weight::from_parts(1_439_873, 0).saturating_mul(p.into())) + // Estimated: `11037` + // Minimum execution time: 8_902_000 picoseconds. + Weight::from_parts(21_168_031, 11037) + // Standard Error: 3_576 + .saturating_add(Weight::from_parts(1_431_542, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. - fn clear_identity(r: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `469 + r * (5 Β±0) + s * (32 Β±0) + x * (66 Β±0)` - // Estimated: `11003` - // Minimum execution time: 55_687_000 picoseconds. - Weight::from_parts(30_695_182, 11003) - // Standard Error: 9_921 - .saturating_add(Weight::from_parts(162_357, 0).saturating_mul(r.into())) - // Standard Error: 1_937 - .saturating_add(Weight::from_parts(1_427_998, 0).saturating_mul(s.into())) + fn clear_identity(_r: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7070 + r * (5 Β±0) + s * (32 Β±0)` + // Estimated: `11037` + // Minimum execution time: 52_468_000 picoseconds. + Weight::from_parts(56_065_452, 11037) + // Standard Error: 2_274 + .saturating_add(Weight::from_parts(1_399_051, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn request_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `367 + r * (57 Β±0) + x * (66 Β±0)` - // Estimated: `11003` - // Minimum execution time: 34_876_000 picoseconds. - Weight::from_parts(32_207_018, 11003) - // Standard Error: 5_247 - .saturating_add(Weight::from_parts(249_156, 0).saturating_mul(r.into())) + // Measured: `6968 + r * (57 Β±0)` + // Estimated: `11037` + // Minimum execution time: 67_951_000 picoseconds. + Weight::from_parts(69_591_058, 11037) + // Standard Error: 4_420 + .saturating_add(Weight::from_parts(209_239, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn cancel_request(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `398 + x * (66 Β±0)` - // Estimated: `11003` - // Minimum execution time: 30_689_000 picoseconds. - Weight::from_parts(31_967_170, 11003) - // Standard Error: 5_387 - .saturating_add(Weight::from_parts(42_676, 0).saturating_mul(r.into())) + // Measured: `6999` + // Estimated: `11037` + // Minimum execution time: 67_818_000 picoseconds. + Weight::from_parts(70_356_319, 11037) + // Standard Error: 5_116 + .saturating_add(Weight::from_parts(4_264, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 Β±0)` // Estimated: `2626` - // Minimum execution time: 7_357_000 picoseconds. - Weight::from_parts(7_932_950, 2626) - // Standard Error: 1_804 - .saturating_add(Weight::from_parts(132_653, 0).saturating_mul(r.into())) + // Minimum execution time: 6_096_000 picoseconds. + Weight::from_parts(6_470_752, 2626) + // Standard Error: 1_328 + .saturating_add(Weight::from_parts(94_764, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 Β±0)` // Estimated: `2626` - // Minimum execution time: 7_437_000 picoseconds. - Weight::from_parts(8_051_889, 2626) - // Standard Error: 1_997 - .saturating_add(Weight::from_parts(129_592, 0).saturating_mul(r.into())) + // Minimum execution time: 6_278_000 picoseconds. + Weight::from_parts(6_678_997, 2626) + // Standard Error: 1_396 + .saturating_add(Weight::from_parts(87_207, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 Β±0)` // Estimated: `2626` - // Minimum execution time: 7_385_000 picoseconds. - Weight::from_parts(7_911_589, 2626) - // Standard Error: 1_791 - .saturating_add(Weight::from_parts(125_788, 0).saturating_mul(r.into())) + // Minimum execution time: 6_130_000 picoseconds. + Weight::from_parts(6_516_146, 2626) + // Standard Error: 1_356 + .saturating_add(Weight::from_parts(88_455, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn provide_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `445 + r * (57 Β±0) + x * (66 Β±0)` - // Estimated: `11003` - // Minimum execution time: 24_073_000 picoseconds. - Weight::from_parts(17_817_684, 11003) - // Standard Error: 8_612 - .saturating_add(Weight::from_parts(406_251, 0).saturating_mul(r.into())) + // Measured: `7046 + r * (57 Β±0)` + // Estimated: `11037` + // Minimum execution time: 87_100_000 picoseconds. + Weight::from_parts(87_601_945, 11037) + // Standard Error: 22_724 + .saturating_add(Weight::from_parts(458_296, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. fn kill_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `676 + r * (5 Β±0) + s * (32 Β±0) + x * (66 Β±0)` - // Estimated: `11003` - // Minimum execution time: 73_981_000 picoseconds. - Weight::from_parts(51_684_057, 11003) - // Standard Error: 12_662 - .saturating_add(Weight::from_parts(145_285, 0).saturating_mul(r.into())) - // Standard Error: 2_472 - .saturating_add(Weight::from_parts(1_421_039, 0).saturating_mul(s.into())) + // Measured: `7277 + r * (5 Β±0) + s * (32 Β±0)` + // Estimated: `11037` + // Minimum execution time: 65_925_000 picoseconds. + Weight::from_parts(70_786_250, 11037) + // Standard Error: 19_680 + .saturating_add(Weight::from_parts(29_782, 0).saturating_mul(r.into())) + // Standard Error: 3_839 + .saturating_add(Weight::from_parts(1_377_604, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 99]`. fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `475 + s * (36 Β±0)` - // Estimated: `11003` - // Minimum execution time: 29_367_000 picoseconds. - Weight::from_parts(34_214_998, 11003) - // Standard Error: 1_522 - .saturating_add(Weight::from_parts(114_551, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 25_916_000 picoseconds. + Weight::from_parts(29_781_270, 11037) + // Standard Error: 1_326 + .saturating_add(Weight::from_parts(101_515, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `591 + s * (3 Β±0)` - // Estimated: `11003` - // Minimum execution time: 12_384_000 picoseconds. - Weight::from_parts(14_417_903, 11003) - // Standard Error: 539 - .saturating_add(Weight::from_parts(38_371, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 12_142_000 picoseconds. + Weight::from_parts(14_179_741, 11037) + // Standard Error: 538 + .saturating_add(Weight::from_parts(38_847, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `638 + s * (35 Β±0)` - // Estimated: `11003` - // Minimum execution time: 33_327_000 picoseconds. - Weight::from_parts(36_208_941, 11003) - // Standard Error: 1_240 - .saturating_add(Weight::from_parts(105_805, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 29_327_000 picoseconds. + Weight::from_parts(32_163_402, 11037) + // Standard Error: 1_047 + .saturating_add(Weight::from_parts(87_159, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 99]`. fn quit_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `704 + s * (37 Β±0)` // Estimated: `6723` - // Minimum execution time: 23_764_000 picoseconds. - Weight::from_parts(26_407_731, 6723) - // Standard Error: 1_025 - .saturating_add(Weight::from_parts(101_112, 0).saturating_mul(s.into())) + // Minimum execution time: 22_380_000 picoseconds. + Weight::from_parts(24_557_574, 6723) + // Standard Error: 1_131 + .saturating_add(Weight::from_parts(86_358, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -358,367 +353,359 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_873_000 picoseconds. - Weight::from_parts(13_873_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) + // Minimum execution time: 6_791_000 picoseconds. + Weight::from_parts(7_126_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Identity::UsernameAuthorities` (r:0 w:1) + /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn remove_username_authority() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_653_000 picoseconds. - Weight::from_parts(10_653_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) + // Measured: `80` + // Estimated: `3517` + // Minimum execution time: 9_657_000 picoseconds. + Weight::from_parts(9_922_000, 3517) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Identity::PendingUsernames` (r:1 w:0) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_username_for() -> Weight { // Proof Size summary in bytes: // Measured: `80` // Estimated: `11037` - // Minimum execution time: 75_928_000 picoseconds. - Weight::from_parts(75_928_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + // Minimum execution time: 67_928_000 picoseconds. + Weight::from_parts(69_993_000, 11037) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::AccountOfUsername` (r:0 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn accept_username() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `115` // Estimated: `11037` - // Minimum execution time: 38_157_000 picoseconds. - Weight::from_parts(38_157_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(3)) + // Minimum execution time: 20_496_000 picoseconds. + Weight::from_parts(20_971_000, 11037) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn remove_expired_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3542` - // Minimum execution time: 46_821_000 picoseconds. - Weight::from_parts(46_821_000, 0) - .saturating_add(Weight::from_parts(0, 3542)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) + // Measured: `115` + // Estimated: `3550` + // Minimum execution time: 13_845_000 picoseconds. + Weight::from_parts(16_201_000, 3550) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::AccountOfUsername` (r:1 w:0) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_primary_username() -> Weight { // Proof Size summary in bytes: - // Measured: `247` + // Measured: `257` // Estimated: `11037` - // Minimum execution time: 22_515_000 picoseconds. - Weight::from_parts(22_515_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) + // Minimum execution time: 15_900_000 picoseconds. + Weight::from_parts(16_716_000, 11037) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:0) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn remove_dangling_username() -> Weight { // Proof Size summary in bytes: - // Measured: `126` + // Measured: `98` // Estimated: `11037` - // Minimum execution time: 15_997_000 picoseconds. - Weight::from_parts(15_997_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) + // Minimum execution time: 11_538_000 picoseconds. + Weight::from_parts(11_898_000, 11037) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `32 + r * (57 Β±0)` // Estimated: `2626` - // Minimum execution time: 11_683_000 picoseconds. - Weight::from_parts(12_515_830, 2626) - // Standard Error: 2_154 - .saturating_add(Weight::from_parts(147_919, 0).saturating_mul(r.into())) + // Minimum execution time: 8_857_000 picoseconds. + Weight::from_parts(9_331_464, 2626) + // Standard Error: 1_745 + .saturating_add(Weight::from_parts(94_096, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. - fn set_identity(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `442 + r * (5 Β±0)` - // Estimated: `11003` - // Minimum execution time: 32_949_000 picoseconds. - Weight::from_parts(31_329_634, 11003) - // Standard Error: 4_496 - .saturating_add(Weight::from_parts(203_570, 0).saturating_mul(r.into())) + fn set_identity(_r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `6978 + r * (5 Β±0)` + // Estimated: `11037` + // Minimum execution time: 96_522_000 picoseconds. + Weight::from_parts(102_738_605, 11037) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:100 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:100 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `101` - // Estimated: `11003 + s * (2589 Β±0)` - // Minimum execution time: 9_157_000 picoseconds. - Weight::from_parts(24_917_444, 11003) - // Standard Error: 4_554 - .saturating_add(Weight::from_parts(3_279_868, 0).saturating_mul(s.into())) + // Estimated: `11037 + s * (2589 Β±0)` + // Minimum execution time: 9_112_000 picoseconds. + Weight::from_parts(22_676_873, 11037) + // Standard Error: 6_494 + .saturating_add(Weight::from_parts(3_279_196, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into())) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 100]`. fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `194 + p * (32 Β±0)` - // Estimated: `11003` - // Minimum execution time: 9_240_000 picoseconds. - Weight::from_parts(23_326_035, 11003) - // Standard Error: 3_664 - .saturating_add(Weight::from_parts(1_439_873, 0).saturating_mul(p.into())) + // Estimated: `11037` + // Minimum execution time: 8_902_000 picoseconds. + Weight::from_parts(21_168_031, 11037) + // Standard Error: 3_576 + .saturating_add(Weight::from_parts(1_431_542, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. - fn clear_identity(r: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `469 + r * (5 Β±0) + s * (32 Β±0) + x * (66 Β±0)` - // Estimated: `11003` - // Minimum execution time: 55_687_000 picoseconds. - Weight::from_parts(30_695_182, 11003) - // Standard Error: 9_921 - .saturating_add(Weight::from_parts(162_357, 0).saturating_mul(r.into())) - // Standard Error: 1_937 - .saturating_add(Weight::from_parts(1_427_998, 0).saturating_mul(s.into())) + fn clear_identity(_r: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7070 + r * (5 Β±0) + s * (32 Β±0)` + // Estimated: `11037` + // Minimum execution time: 52_468_000 picoseconds. + Weight::from_parts(56_065_452, 11037) + // Standard Error: 2_274 + .saturating_add(Weight::from_parts(1_399_051, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn request_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `367 + r * (57 Β±0) + x * (66 Β±0)` - // Estimated: `11003` - // Minimum execution time: 34_876_000 picoseconds. - Weight::from_parts(32_207_018, 11003) - // Standard Error: 5_247 - .saturating_add(Weight::from_parts(249_156, 0).saturating_mul(r.into())) + // Measured: `6968 + r * (57 Β±0)` + // Estimated: `11037` + // Minimum execution time: 67_951_000 picoseconds. + Weight::from_parts(69_591_058, 11037) + // Standard Error: 4_420 + .saturating_add(Weight::from_parts(209_239, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn cancel_request(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `398 + x * (66 Β±0)` - // Estimated: `11003` - // Minimum execution time: 30_689_000 picoseconds. - Weight::from_parts(31_967_170, 11003) - // Standard Error: 5_387 - .saturating_add(Weight::from_parts(42_676, 0).saturating_mul(r.into())) + // Measured: `6999` + // Estimated: `11037` + // Minimum execution time: 67_818_000 picoseconds. + Weight::from_parts(70_356_319, 11037) + // Standard Error: 5_116 + .saturating_add(Weight::from_parts(4_264, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 Β±0)` // Estimated: `2626` - // Minimum execution time: 7_357_000 picoseconds. - Weight::from_parts(7_932_950, 2626) - // Standard Error: 1_804 - .saturating_add(Weight::from_parts(132_653, 0).saturating_mul(r.into())) + // Minimum execution time: 6_096_000 picoseconds. + Weight::from_parts(6_470_752, 2626) + // Standard Error: 1_328 + .saturating_add(Weight::from_parts(94_764, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 Β±0)` // Estimated: `2626` - // Minimum execution time: 7_437_000 picoseconds. - Weight::from_parts(8_051_889, 2626) - // Standard Error: 1_997 - .saturating_add(Weight::from_parts(129_592, 0).saturating_mul(r.into())) + // Minimum execution time: 6_278_000 picoseconds. + Weight::from_parts(6_678_997, 2626) + // Standard Error: 1_396 + .saturating_add(Weight::from_parts(87_207, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 Β±0)` // Estimated: `2626` - // Minimum execution time: 7_385_000 picoseconds. - Weight::from_parts(7_911_589, 2626) - // Standard Error: 1_791 - .saturating_add(Weight::from_parts(125_788, 0).saturating_mul(r.into())) + // Minimum execution time: 6_130_000 picoseconds. + Weight::from_parts(6_516_146, 2626) + // Standard Error: 1_356 + .saturating_add(Weight::from_parts(88_455, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn provide_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `445 + r * (57 Β±0) + x * (66 Β±0)` - // Estimated: `11003` - // Minimum execution time: 24_073_000 picoseconds. - Weight::from_parts(17_817_684, 11003) - // Standard Error: 8_612 - .saturating_add(Weight::from_parts(406_251, 0).saturating_mul(r.into())) + // Measured: `7046 + r * (57 Β±0)` + // Estimated: `11037` + // Minimum execution time: 87_100_000 picoseconds. + Weight::from_parts(87_601_945, 11037) + // Standard Error: 22_724 + .saturating_add(Weight::from_parts(458_296, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. fn kill_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `676 + r * (5 Β±0) + s * (32 Β±0) + x * (66 Β±0)` - // Estimated: `11003` - // Minimum execution time: 73_981_000 picoseconds. - Weight::from_parts(51_684_057, 11003) - // Standard Error: 12_662 - .saturating_add(Weight::from_parts(145_285, 0).saturating_mul(r.into())) - // Standard Error: 2_472 - .saturating_add(Weight::from_parts(1_421_039, 0).saturating_mul(s.into())) + // Measured: `7277 + r * (5 Β±0) + s * (32 Β±0)` + // Estimated: `11037` + // Minimum execution time: 65_925_000 picoseconds. + Weight::from_parts(70_786_250, 11037) + // Standard Error: 19_680 + .saturating_add(Weight::from_parts(29_782, 0).saturating_mul(r.into())) + // Standard Error: 3_839 + .saturating_add(Weight::from_parts(1_377_604, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 99]`. fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `475 + s * (36 Β±0)` - // Estimated: `11003` - // Minimum execution time: 29_367_000 picoseconds. - Weight::from_parts(34_214_998, 11003) - // Standard Error: 1_522 - .saturating_add(Weight::from_parts(114_551, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 25_916_000 picoseconds. + Weight::from_parts(29_781_270, 11037) + // Standard Error: 1_326 + .saturating_add(Weight::from_parts(101_515, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `591 + s * (3 Β±0)` - // Estimated: `11003` - // Minimum execution time: 12_384_000 picoseconds. - Weight::from_parts(14_417_903, 11003) - // Standard Error: 539 - .saturating_add(Weight::from_parts(38_371, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 12_142_000 picoseconds. + Weight::from_parts(14_179_741, 11037) + // Standard Error: 538 + .saturating_add(Weight::from_parts(38_847, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `638 + s * (35 Β±0)` - // Estimated: `11003` - // Minimum execution time: 33_327_000 picoseconds. - Weight::from_parts(36_208_941, 11003) - // Standard Error: 1_240 - .saturating_add(Weight::from_parts(105_805, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 29_327_000 picoseconds. + Weight::from_parts(32_163_402, 11037) + // Standard Error: 1_047 + .saturating_add(Weight::from_parts(87_159, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 99]`. fn quit_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `704 + s * (37 Β±0)` // Estimated: `6723` - // Minimum execution time: 23_764_000 picoseconds. - Weight::from_parts(26_407_731, 6723) - // Standard Error: 1_025 - .saturating_add(Weight::from_parts(101_112, 0).saturating_mul(s.into())) + // Minimum execution time: 22_380_000 picoseconds. + Weight::from_parts(24_557_574, 6723) + // Standard Error: 1_131 + .saturating_add(Weight::from_parts(86_358, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -728,92 +715,88 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_873_000 picoseconds. - Weight::from_parts(13_873_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(RocksDbWeight::get().writes(1)) + // Minimum execution time: 6_791_000 picoseconds. + Weight::from_parts(7_126_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Identity::UsernameAuthorities` (r:0 w:1) + /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn remove_username_authority() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_653_000 picoseconds. - Weight::from_parts(10_653_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(RocksDbWeight::get().writes(1)) + // Measured: `80` + // Estimated: `3517` + // Minimum execution time: 9_657_000 picoseconds. + Weight::from_parts(9_922_000, 3517) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Identity::PendingUsernames` (r:1 w:0) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_username_for() -> Weight { // Proof Size summary in bytes: // Measured: `80` // Estimated: `11037` - // Minimum execution time: 75_928_000 picoseconds. - Weight::from_parts(75_928_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(RocksDbWeight::get().reads(3)) - .saturating_add(RocksDbWeight::get().writes(3)) + // Minimum execution time: 67_928_000 picoseconds. + Weight::from_parts(69_993_000, 11037) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::AccountOfUsername` (r:0 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn accept_username() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `115` // Estimated: `11037` - // Minimum execution time: 38_157_000 picoseconds. - Weight::from_parts(38_157_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(RocksDbWeight::get().reads(2)) - .saturating_add(RocksDbWeight::get().writes(3)) + // Minimum execution time: 20_496_000 picoseconds. + Weight::from_parts(20_971_000, 11037) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn remove_expired_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3542` - // Minimum execution time: 46_821_000 picoseconds. - Weight::from_parts(46_821_000, 0) - .saturating_add(Weight::from_parts(0, 3542)) - .saturating_add(RocksDbWeight::get().reads(1)) - .saturating_add(RocksDbWeight::get().writes(1)) + // Measured: `115` + // Estimated: `3550` + // Minimum execution time: 13_845_000 picoseconds. + Weight::from_parts(16_201_000, 3550) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::AccountOfUsername` (r:1 w:0) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_primary_username() -> Weight { // Proof Size summary in bytes: - // Measured: `247` + // Measured: `257` // Estimated: `11037` - // Minimum execution time: 22_515_000 picoseconds. - Weight::from_parts(22_515_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(RocksDbWeight::get().reads(2)) - .saturating_add(RocksDbWeight::get().writes(1)) + // Minimum execution time: 15_900_000 picoseconds. + Weight::from_parts(16_716_000, 11037) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:0) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn remove_dangling_username() -> Weight { // Proof Size summary in bytes: - // Measured: `126` + // Measured: `98` // Estimated: `11037` - // Minimum execution time: 15_997_000 picoseconds. - Weight::from_parts(15_997_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(RocksDbWeight::get().reads(2)) - .saturating_add(RocksDbWeight::get().writes(1)) + // Minimum execution time: 11_538_000 picoseconds. + Weight::from_parts(11_898_000, 11037) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/im-online/src/mock.rs b/substrate/frame/im-online/src/mock.rs index 9dad148b10f..c0ba9143e13 100644 --- a/substrate/frame/im-online/src/mock.rs +++ b/substrate/frame/im-online/src/mock.rs @@ -27,7 +27,7 @@ use frame_support::{ use pallet_session::historical as pallet_session_historical; use sp_core::H256; use sp_runtime::{ - testing::{TestXt, UintAuthorityId}, + testing::UintAuthorityId, traits::{BlakeTwo256, ConvertInto, IdentityLookup}, BuildStorage, Permill, }; @@ -78,7 +78,7 @@ impl pallet_session::historical::SessionManager for TestSessionManager } /// An extrinsic type used for tests. -pub type Extrinsic = TestXt; +pub type Extrinsic = sp_runtime::generic::UncheckedExtrinsic; type IdentificationTuple = (u64, u64); type Offence = crate::UnresponsivenessOffence; diff --git a/substrate/frame/im-online/src/tests.rs b/substrate/frame/im-online/src/tests.rs index 79036760c2d..7efca926eb0 100644 --- a/substrate/frame/im-online/src/tests.rs +++ b/substrate/frame/im-online/src/tests.rs @@ -231,7 +231,7 @@ fn should_generate_heartbeats() { // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); - let heartbeat = match ex.call { + let heartbeat = match ex.function { crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => heartbeat, e => panic!("Unexpected call: {:?}", e), @@ -345,7 +345,7 @@ fn should_not_send_a_report_if_already_online() { assert_eq!(pool_state.read().transactions.len(), 0); // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); - let heartbeat = match ex.call { + let heartbeat = match ex.function { crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => heartbeat, e => panic!("Unexpected call: {:?}", e), diff --git a/substrate/frame/im-online/src/weights.rs b/substrate/frame/im-online/src/weights.rs index c3db02af257..11357b1e7b7 100644 --- a/substrate/frame/im-online/src/weights.rs +++ b/substrate/frame/im-online/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_im_online +//! Autogenerated weights for `pallet_im_online` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/im-online/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/im-online/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,60 +49,60 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_im_online. +/// Weight functions needed for `pallet_im_online`. pub trait WeightInfo { fn validate_unsigned_and_then_heartbeat(k: u32, ) -> Weight; } -/// Weights for pallet_im_online using the Substrate node and recommended hardware. +/// Weights for `pallet_im_online` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Session CurrentIndex (r:1 w:0) - /// Proof Skipped: Session CurrentIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ImOnline Keys (r:1 w:0) - /// Proof: ImOnline Keys (max_values: Some(1), max_size: Some(320002), added: 320497, mode: MaxEncodedLen) - /// Storage: ImOnline ReceivedHeartbeats (r:1 w:1) - /// Proof: ImOnline ReceivedHeartbeats (max_values: None, max_size: Some(25), added: 2500, mode: MaxEncodedLen) - /// Storage: ImOnline AuthoredBlocks (r:1 w:0) - /// Proof: ImOnline AuthoredBlocks (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::CurrentIndex` (r:1 w:0) + /// Proof: `Session::CurrentIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ImOnline::Keys` (r:1 w:0) + /// Proof: `ImOnline::Keys` (`max_values`: Some(1), `max_size`: Some(320002), added: 320497, mode: `MaxEncodedLen`) + /// Storage: `ImOnline::ReceivedHeartbeats` (r:1 w:1) + /// Proof: `ImOnline::ReceivedHeartbeats` (`max_values`: None, `max_size`: Some(25), added: 2500, mode: `MaxEncodedLen`) + /// Storage: `ImOnline::AuthoredBlocks` (r:1 w:0) + /// Proof: `ImOnline::AuthoredBlocks` (`max_values`: None, `max_size`: Some(56), added: 2531, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 1000]`. fn validate_unsigned_and_then_heartbeat(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `295 + k * (32 Β±0)` + // Measured: `327 + k * (32 Β±0)` // Estimated: `321487 + k * (1761 Β±0)` - // Minimum execution time: 80_568_000 picoseconds. - Weight::from_parts(95_175_595, 321487) - // Standard Error: 627 - .saturating_add(Weight::from_parts(39_094, 0).saturating_mul(k.into())) + // Minimum execution time: 65_515_000 picoseconds. + Weight::from_parts(74_765_329, 321487) + // Standard Error: 500 + .saturating_add(Weight::from_parts(39_171, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1761).saturating_mul(k.into())) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Session Validators (r:1 w:0) - /// Proof Skipped: Session Validators (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Session CurrentIndex (r:1 w:0) - /// Proof Skipped: Session CurrentIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ImOnline Keys (r:1 w:0) - /// Proof: ImOnline Keys (max_values: Some(1), max_size: Some(320002), added: 320497, mode: MaxEncodedLen) - /// Storage: ImOnline ReceivedHeartbeats (r:1 w:1) - /// Proof: ImOnline ReceivedHeartbeats (max_values: None, max_size: Some(25), added: 2500, mode: MaxEncodedLen) - /// Storage: ImOnline AuthoredBlocks (r:1 w:0) - /// Proof: ImOnline AuthoredBlocks (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::CurrentIndex` (r:1 w:0) + /// Proof: `Session::CurrentIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ImOnline::Keys` (r:1 w:0) + /// Proof: `ImOnline::Keys` (`max_values`: Some(1), `max_size`: Some(320002), added: 320497, mode: `MaxEncodedLen`) + /// Storage: `ImOnline::ReceivedHeartbeats` (r:1 w:1) + /// Proof: `ImOnline::ReceivedHeartbeats` (`max_values`: None, `max_size`: Some(25), added: 2500, mode: `MaxEncodedLen`) + /// Storage: `ImOnline::AuthoredBlocks` (r:1 w:0) + /// Proof: `ImOnline::AuthoredBlocks` (`max_values`: None, `max_size`: Some(56), added: 2531, mode: `MaxEncodedLen`) /// The range of component `k` is `[1, 1000]`. fn validate_unsigned_and_then_heartbeat(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `295 + k * (32 Β±0)` + // Measured: `327 + k * (32 Β±0)` // Estimated: `321487 + k * (1761 Β±0)` - // Minimum execution time: 80_568_000 picoseconds. - Weight::from_parts(95_175_595, 321487) - // Standard Error: 627 - .saturating_add(Weight::from_parts(39_094, 0).saturating_mul(k.into())) + // Minimum execution time: 65_515_000 picoseconds. + Weight::from_parts(74_765_329, 321487) + // Standard Error: 500 + .saturating_add(Weight::from_parts(39_171, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1761).saturating_mul(k.into())) diff --git a/substrate/frame/indices/src/weights.rs b/substrate/frame/indices/src/weights.rs index d081cc738b1..6b571164eb6 100644 --- a/substrate/frame/indices/src/weights.rs +++ b/substrate/frame/indices/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_indices +//! Autogenerated weights for `pallet_indices` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/indices/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/indices/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_indices. +/// Weight functions needed for `pallet_indices`. pub trait WeightInfo { fn claim() -> Weight; fn transfer() -> Weight; @@ -59,128 +58,128 @@ pub trait WeightInfo { fn freeze() -> Weight; } -/// Weights for pallet_indices using the Substrate node and recommended hardware. +/// Weights for `pallet_indices` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn claim() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3534` - // Minimum execution time: 25_491_000 picoseconds. - Weight::from_parts(26_456_000, 3534) + // Minimum execution time: 20_825_000 picoseconds. + Weight::from_parts(21_507_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: // Measured: `275` // Estimated: `3593` - // Minimum execution time: 38_027_000 picoseconds. - Weight::from_parts(38_749_000, 3593) + // Minimum execution time: 31_091_000 picoseconds. + Weight::from_parts(31_923_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn free() -> Weight { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 26_652_000 picoseconds. - Weight::from_parts(27_273_000, 3534) + // Minimum execution time: 21_832_000 picoseconds. + Weight::from_parts(22_436_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `275` // Estimated: `3593` - // Minimum execution time: 29_464_000 picoseconds. - Weight::from_parts(30_959_000, 3593) + // Minimum execution time: 23_876_000 picoseconds. + Weight::from_parts(24_954_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 29_015_000 picoseconds. - Weight::from_parts(29_714_000, 3534) + // Minimum execution time: 22_954_000 picoseconds. + Weight::from_parts(23_792_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn claim() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3534` - // Minimum execution time: 25_491_000 picoseconds. - Weight::from_parts(26_456_000, 3534) + // Minimum execution time: 20_825_000 picoseconds. + Weight::from_parts(21_507_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: // Measured: `275` // Estimated: `3593` - // Minimum execution time: 38_027_000 picoseconds. - Weight::from_parts(38_749_000, 3593) + // Minimum execution time: 31_091_000 picoseconds. + Weight::from_parts(31_923_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn free() -> Weight { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 26_652_000 picoseconds. - Weight::from_parts(27_273_000, 3534) + // Minimum execution time: 21_832_000 picoseconds. + Weight::from_parts(22_436_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `275` // Estimated: `3593` - // Minimum execution time: 29_464_000 picoseconds. - Weight::from_parts(30_959_000, 3593) + // Minimum execution time: 23_876_000 picoseconds. + Weight::from_parts(24_954_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 29_015_000 picoseconds. - Weight::from_parts(29_714_000, 3534) + // Minimum execution time: 22_954_000 picoseconds. + Weight::from_parts(23_792_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/lottery/src/weights.rs b/substrate/frame/lottery/src/weights.rs index 3b4e5623753..7e56cdf90db 100644 --- a/substrate/frame/lottery/src/weights.rs +++ b/substrate/frame/lottery/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_lottery +//! Autogenerated weights for `pallet_lottery` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/lottery/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/lottery/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_lottery. +/// Weight functions needed for `pallet_lottery`. pub trait WeightInfo { fn buy_ticket() -> Weight; fn set_calls(n: u32, ) -> Weight; @@ -60,214 +59,222 @@ pub trait WeightInfo { fn on_initialize_repeat() -> Weight; } -/// Weights for pallet_lottery using the Substrate node and recommended hardware. +/// Weights for `pallet_lottery` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Lottery Lottery (r:1 w:0) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) - /// Storage: Lottery CallIndices (r:1 w:0) - /// Proof: Lottery CallIndices (max_values: Some(1), max_size: Some(21), added: 516, mode: MaxEncodedLen) - /// Storage: Lottery TicketsCount (r:1 w:1) - /// Proof: Lottery TicketsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Lottery Participants (r:1 w:1) - /// Proof: Lottery Participants (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Lottery LotteryIndex (r:1 w:0) - /// Proof: Lottery LotteryIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Lottery Tickets (r:0 w:1) - /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Lottery` (r:1 w:0) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) + /// Storage: `Lottery::CallIndices` (r:1 w:0) + /// Proof: `Lottery::CallIndices` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) + /// Storage: `Lottery::TicketsCount` (r:1 w:1) + /// Proof: `Lottery::TicketsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Participants` (r:1 w:1) + /// Proof: `Lottery::Participants` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Lottery::LotteryIndex` (r:1 w:0) + /// Proof: `Lottery::LotteryIndex` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Tickets` (r:0 w:1) + /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn buy_ticket() -> Weight { // Proof Size summary in bytes: - // Measured: `452` - // Estimated: `3593` - // Minimum execution time: 60_298_000 picoseconds. - Weight::from_parts(62_058_000, 3593) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `492` + // Estimated: `3997` + // Minimum execution time: 57_891_000 picoseconds. + Weight::from_parts(59_508_000, 3997) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Lottery CallIndices (r:0 w:1) - /// Proof: Lottery CallIndices (max_values: Some(1), max_size: Some(21), added: 516, mode: MaxEncodedLen) + /// Storage: `Lottery::CallIndices` (r:0 w:1) + /// Proof: `Lottery::CallIndices` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 10]`. fn set_calls(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_291_000 picoseconds. - Weight::from_parts(8_178_186, 0) - // Standard Error: 3_048 - .saturating_add(Weight::from_parts(330_871, 0).saturating_mul(n.into())) + // Minimum execution time: 5_026_000 picoseconds. + Weight::from_parts(5_854_666, 0) + // Standard Error: 3_233 + .saturating_add(Weight::from_parts(334_818, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Lottery Lottery (r:1 w:1) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) - /// Storage: Lottery LotteryIndex (r:1 w:1) - /// Proof: Lottery LotteryIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Lottery::Lottery` (r:1 w:1) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) + /// Storage: `Lottery::LotteryIndex` (r:1 w:1) + /// Proof: `Lottery::LotteryIndex` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn start_lottery() -> Weight { // Proof Size summary in bytes: - // Measured: `161` + // Measured: `194` // Estimated: `3593` - // Minimum execution time: 36_741_000 picoseconds. - Weight::from_parts(38_288_000, 3593) + // Minimum execution time: 26_216_000 picoseconds. + Weight::from_parts(27_216_000, 3593) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Lottery Lottery (r:1 w:1) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) + /// Storage: `Lottery::Lottery` (r:1 w:1) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) fn stop_repeat() -> Weight { // Proof Size summary in bytes: - // Measured: `219` + // Measured: `252` // Estimated: `1514` - // Minimum execution time: 7_270_000 picoseconds. - Weight::from_parts(7_578_000, 1514) + // Minimum execution time: 6_208_000 picoseconds. + Weight::from_parts(6_427_000, 1514) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) - /// Proof: RandomnessCollectiveFlip RandomMaterial (max_values: Some(1), max_size: Some(2594), added: 3089, mode: MaxEncodedLen) - /// Storage: Lottery Lottery (r:1 w:1) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Lottery TicketsCount (r:1 w:1) - /// Proof: Lottery TicketsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Lottery Tickets (r:1 w:0) - /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) + /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Lottery` (r:1 w:1) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Lottery::TicketsCount` (r:1 w:1) + /// Proof: `Lottery::TicketsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Tickets` (r:1 w:0) + /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn on_initialize_end() -> Weight { // Proof Size summary in bytes: - // Measured: `558` + // Measured: `591` // Estimated: `6196` - // Minimum execution time: 76_611_000 picoseconds. - Weight::from_parts(78_107_000, 6196) + // Minimum execution time: 58_660_000 picoseconds. + Weight::from_parts(59_358_000, 6196) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) - /// Proof: RandomnessCollectiveFlip RandomMaterial (max_values: Some(1), max_size: Some(2594), added: 3089, mode: MaxEncodedLen) - /// Storage: Lottery Lottery (r:1 w:1) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Lottery TicketsCount (r:1 w:1) - /// Proof: Lottery TicketsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Lottery Tickets (r:1 w:0) - /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) - /// Storage: Lottery LotteryIndex (r:1 w:1) - /// Proof: Lottery LotteryIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) + /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Lottery` (r:1 w:1) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Lottery::TicketsCount` (r:1 w:1) + /// Proof: `Lottery::TicketsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Tickets` (r:1 w:0) + /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `Lottery::LotteryIndex` (r:1 w:1) + /// Proof: `Lottery::LotteryIndex` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn on_initialize_repeat() -> Weight { // Proof Size summary in bytes: - // Measured: `558` + // Measured: `591` // Estimated: `6196` - // Minimum execution time: 78_731_000 picoseconds. - Weight::from_parts(80_248_000, 6196) + // Minimum execution time: 59_376_000 picoseconds. + Weight::from_parts(60_598_000, 6196) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Lottery Lottery (r:1 w:0) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) - /// Storage: Lottery CallIndices (r:1 w:0) - /// Proof: Lottery CallIndices (max_values: Some(1), max_size: Some(21), added: 516, mode: MaxEncodedLen) - /// Storage: Lottery TicketsCount (r:1 w:1) - /// Proof: Lottery TicketsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Lottery Participants (r:1 w:1) - /// Proof: Lottery Participants (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Lottery LotteryIndex (r:1 w:0) - /// Proof: Lottery LotteryIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Lottery Tickets (r:0 w:1) - /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Lottery` (r:1 w:0) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) + /// Storage: `Lottery::CallIndices` (r:1 w:0) + /// Proof: `Lottery::CallIndices` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) + /// Storage: `Lottery::TicketsCount` (r:1 w:1) + /// Proof: `Lottery::TicketsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Participants` (r:1 w:1) + /// Proof: `Lottery::Participants` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Lottery::LotteryIndex` (r:1 w:0) + /// Proof: `Lottery::LotteryIndex` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Tickets` (r:0 w:1) + /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn buy_ticket() -> Weight { // Proof Size summary in bytes: - // Measured: `452` - // Estimated: `3593` - // Minimum execution time: 60_298_000 picoseconds. - Weight::from_parts(62_058_000, 3593) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `492` + // Estimated: `3997` + // Minimum execution time: 57_891_000 picoseconds. + Weight::from_parts(59_508_000, 3997) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Lottery CallIndices (r:0 w:1) - /// Proof: Lottery CallIndices (max_values: Some(1), max_size: Some(21), added: 516, mode: MaxEncodedLen) + /// Storage: `Lottery::CallIndices` (r:0 w:1) + /// Proof: `Lottery::CallIndices` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 10]`. fn set_calls(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_291_000 picoseconds. - Weight::from_parts(8_178_186, 0) - // Standard Error: 3_048 - .saturating_add(Weight::from_parts(330_871, 0).saturating_mul(n.into())) + // Minimum execution time: 5_026_000 picoseconds. + Weight::from_parts(5_854_666, 0) + // Standard Error: 3_233 + .saturating_add(Weight::from_parts(334_818, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Lottery Lottery (r:1 w:1) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) - /// Storage: Lottery LotteryIndex (r:1 w:1) - /// Proof: Lottery LotteryIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Lottery::Lottery` (r:1 w:1) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) + /// Storage: `Lottery::LotteryIndex` (r:1 w:1) + /// Proof: `Lottery::LotteryIndex` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn start_lottery() -> Weight { // Proof Size summary in bytes: - // Measured: `161` + // Measured: `194` // Estimated: `3593` - // Minimum execution time: 36_741_000 picoseconds. - Weight::from_parts(38_288_000, 3593) + // Minimum execution time: 26_216_000 picoseconds. + Weight::from_parts(27_216_000, 3593) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Lottery Lottery (r:1 w:1) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) + /// Storage: `Lottery::Lottery` (r:1 w:1) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) fn stop_repeat() -> Weight { // Proof Size summary in bytes: - // Measured: `219` + // Measured: `252` // Estimated: `1514` - // Minimum execution time: 7_270_000 picoseconds. - Weight::from_parts(7_578_000, 1514) + // Minimum execution time: 6_208_000 picoseconds. + Weight::from_parts(6_427_000, 1514) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) - /// Proof: RandomnessCollectiveFlip RandomMaterial (max_values: Some(1), max_size: Some(2594), added: 3089, mode: MaxEncodedLen) - /// Storage: Lottery Lottery (r:1 w:1) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Lottery TicketsCount (r:1 w:1) - /// Proof: Lottery TicketsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Lottery Tickets (r:1 w:0) - /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) + /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) + /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Lottery` (r:1 w:1) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Lottery::TicketsCount` (r:1 w:1) + /// Proof: `Lottery::TicketsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Tickets` (r:1 w:0) + /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn on_initialize_end() -> Weight { // Proof Size summary in bytes: - // Measured: `558` + // Measured: `591` // Estimated: `6196` - // Minimum execution time: 76_611_000 picoseconds. - Weight::from_parts(78_107_000, 6196) + // Minimum execution time: 58_660_000 picoseconds. + Weight::from_parts(59_358_000, 6196) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) - /// Proof: RandomnessCollectiveFlip RandomMaterial (max_values: Some(1), max_size: Some(2594), added: 3089, mode: MaxEncodedLen) - /// Storage: Lottery Lottery (r:1 w:1) - /// Proof: Lottery Lottery (max_values: Some(1), max_size: Some(29), added: 524, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Lottery TicketsCount (r:1 w:1) - /// Proof: Lottery TicketsCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Lottery Tickets (r:1 w:0) - /// Proof: Lottery Tickets (max_values: None, max_size: Some(44), added: 2519, mode: MaxEncodedLen) - /// Storage: Lottery LotteryIndex (r:1 w:1) - /// Proof: Lottery LotteryIndex (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) + /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Lottery` (r:1 w:1) + /// Proof: `Lottery::Lottery` (`max_values`: Some(1), `max_size`: Some(29), added: 524, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Lottery::TicketsCount` (r:1 w:1) + /// Proof: `Lottery::TicketsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Lottery::Tickets` (r:1 w:0) + /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `Lottery::LotteryIndex` (r:1 w:1) + /// Proof: `Lottery::LotteryIndex` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn on_initialize_repeat() -> Weight { // Proof Size summary in bytes: - // Measured: `558` + // Measured: `591` // Estimated: `6196` - // Minimum execution time: 78_731_000 picoseconds. - Weight::from_parts(80_248_000, 6196) + // Minimum execution time: 59_376_000 picoseconds. + Weight::from_parts(60_598_000, 6196) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } diff --git a/substrate/frame/membership/src/weights.rs b/substrate/frame/membership/src/weights.rs index 2d18848b89a..f21867d6870 100644 --- a/substrate/frame/membership/src/weights.rs +++ b/substrate/frame/membership/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_membership +//! Autogenerated weights for `pallet_membership` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/membership/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/membership/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_membership. +/// Weight functions needed for `pallet_membership`. pub trait WeightInfo { fn add_member(m: u32, ) -> Weight; fn remove_member(m: u32, ) -> Weight; @@ -61,299 +60,299 @@ pub trait WeightInfo { fn clear_prime() -> Weight; } -/// Weights for pallet_membership using the Substrate node and recommended hardware. +/// Weights for `pallet_membership` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 99]`. fn add_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `208 + m * (64 Β±0)` + // Measured: `207 + m * (64 Β±0)` // Estimated: `4687 + m * (64 Β±0)` - // Minimum execution time: 17_040_000 picoseconds. - Weight::from_parts(18_344_571, 4687) - // Standard Error: 847 - .saturating_add(Weight::from_parts(50_842, 0).saturating_mul(m.into())) + // Minimum execution time: 12_126_000 picoseconds. + Weight::from_parts(13_085_583, 4687) + // Standard Error: 659 + .saturating_add(Weight::from_parts(36_103, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalMembership::Prime` (r:1 w:0) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[2, 100]`. fn remove_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312 + m * (64 Β±0)` + // Measured: `311 + m * (64 Β±0)` // Estimated: `4687 + m * (64 Β±0)` - // Minimum execution time: 20_088_000 picoseconds. - Weight::from_parts(21_271_384, 4687) - // Standard Error: 786 - .saturating_add(Weight::from_parts(44_806, 0).saturating_mul(m.into())) + // Minimum execution time: 14_571_000 picoseconds. + Weight::from_parts(15_532_232, 4687) + // Standard Error: 531 + .saturating_add(Weight::from_parts(35_757, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalMembership::Prime` (r:1 w:0) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[2, 100]`. fn swap_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312 + m * (64 Β±0)` + // Measured: `311 + m * (64 Β±0)` // Estimated: `4687 + m * (64 Β±0)` - // Minimum execution time: 20_308_000 picoseconds. - Weight::from_parts(21_469_843, 4687) - // Standard Error: 782 - .saturating_add(Weight::from_parts(56_893, 0).saturating_mul(m.into())) + // Minimum execution time: 14_833_000 picoseconds. + Weight::from_parts(15_657_084, 4687) + // Standard Error: 650 + .saturating_add(Weight::from_parts(44_467, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalMembership::Prime` (r:1 w:0) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 100]`. fn reset_members(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312 + m * (64 Β±0)` + // Measured: `311 + m * (64 Β±0)` // Estimated: `4687 + m * (64 Β±0)` - // Minimum execution time: 19_464_000 picoseconds. - Weight::from_parts(21_223_702, 4687) - // Standard Error: 1_068 - .saturating_add(Weight::from_parts(165_438, 0).saturating_mul(m.into())) + // Minimum execution time: 14_629_000 picoseconds. + Weight::from_parts(15_578_203, 4687) + // Standard Error: 910 + .saturating_add(Weight::from_parts(145_101, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalMembership::Prime` (r:1 w:1) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 100]`. fn change_key(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312 + m * (64 Β±0)` + // Measured: `311 + m * (64 Β±0)` // Estimated: `4687 + m * (64 Β±0)` - // Minimum execution time: 20_965_000 picoseconds. - Weight::from_parts(22_551_007, 4687) - // Standard Error: 860 - .saturating_add(Weight::from_parts(52_397, 0).saturating_mul(m.into())) + // Minimum execution time: 15_218_000 picoseconds. + Weight::from_parts(16_388_690, 4687) + // Standard Error: 626 + .saturating_add(Weight::from_parts(46_204, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:0) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalMembership Prime (r:0 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:0) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalMembership::Prime` (r:0 w:1) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 100]`. fn set_prime(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `32 + m * (32 Β±0)` + // Measured: `31 + m * (32 Β±0)` // Estimated: `4687 + m * (32 Β±0)` - // Minimum execution time: 7_481_000 picoseconds. - Weight::from_parts(7_959_053, 4687) - // Standard Error: 364 - .saturating_add(Weight::from_parts(18_653, 0).saturating_mul(m.into())) + // Minimum execution time: 5_954_000 picoseconds. + Weight::from_parts(6_544_638, 4687) + // Standard Error: 346 + .saturating_add(Weight::from_parts(17_638, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Prime (r:0 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Prime` (r:0 w:1) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn clear_prime() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_373_000 picoseconds. - Weight::from_parts(3_750_452, 0) + // Minimum execution time: 2_569_000 picoseconds. + Weight::from_parts(2_776_000, 0) .saturating_add(T::DbWeight::get().writes(2_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 99]`. fn add_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `208 + m * (64 Β±0)` + // Measured: `207 + m * (64 Β±0)` // Estimated: `4687 + m * (64 Β±0)` - // Minimum execution time: 17_040_000 picoseconds. - Weight::from_parts(18_344_571, 4687) - // Standard Error: 847 - .saturating_add(Weight::from_parts(50_842, 0).saturating_mul(m.into())) + // Minimum execution time: 12_126_000 picoseconds. + Weight::from_parts(13_085_583, 4687) + // Standard Error: 659 + .saturating_add(Weight::from_parts(36_103, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalMembership::Prime` (r:1 w:0) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[2, 100]`. fn remove_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312 + m * (64 Β±0)` + // Measured: `311 + m * (64 Β±0)` // Estimated: `4687 + m * (64 Β±0)` - // Minimum execution time: 20_088_000 picoseconds. - Weight::from_parts(21_271_384, 4687) - // Standard Error: 786 - .saturating_add(Weight::from_parts(44_806, 0).saturating_mul(m.into())) + // Minimum execution time: 14_571_000 picoseconds. + Weight::from_parts(15_532_232, 4687) + // Standard Error: 531 + .saturating_add(Weight::from_parts(35_757, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalMembership::Prime` (r:1 w:0) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[2, 100]`. fn swap_member(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312 + m * (64 Β±0)` + // Measured: `311 + m * (64 Β±0)` // Estimated: `4687 + m * (64 Β±0)` - // Minimum execution time: 20_308_000 picoseconds. - Weight::from_parts(21_469_843, 4687) - // Standard Error: 782 - .saturating_add(Weight::from_parts(56_893, 0).saturating_mul(m.into())) + // Minimum execution time: 14_833_000 picoseconds. + Weight::from_parts(15_657_084, 4687) + // Standard Error: 650 + .saturating_add(Weight::from_parts(44_467, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalMembership::Prime` (r:1 w:0) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 100]`. fn reset_members(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312 + m * (64 Β±0)` + // Measured: `311 + m * (64 Β±0)` // Estimated: `4687 + m * (64 Β±0)` - // Minimum execution time: 19_464_000 picoseconds. - Weight::from_parts(21_223_702, 4687) - // Standard Error: 1_068 - .saturating_add(Weight::from_parts(165_438, 0).saturating_mul(m.into())) + // Minimum execution time: 14_629_000 picoseconds. + Weight::from_parts(15_578_203, 4687) + // Standard Error: 910 + .saturating_add(Weight::from_parts(145_101, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:1) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Proposals` (r:1 w:0) + /// Proof: `TechnicalCommittee::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalMembership::Prime` (r:1 w:1) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Members` (r:0 w:1) + /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 100]`. fn change_key(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `312 + m * (64 Β±0)` + // Measured: `311 + m * (64 Β±0)` // Estimated: `4687 + m * (64 Β±0)` - // Minimum execution time: 20_965_000 picoseconds. - Weight::from_parts(22_551_007, 4687) - // Standard Error: 860 - .saturating_add(Weight::from_parts(52_397, 0).saturating_mul(m.into())) + // Minimum execution time: 15_218_000 picoseconds. + Weight::from_parts(16_388_690, 4687) + // Standard Error: 626 + .saturating_add(Weight::from_parts(46_204, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Members (r:1 w:0) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalMembership Prime (r:0 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Members` (r:1 w:0) + /// Proof: `TechnicalMembership::Members` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `TechnicalMembership::Prime` (r:0 w:1) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `m` is `[1, 100]`. fn set_prime(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `32 + m * (32 Β±0)` + // Measured: `31 + m * (32 Β±0)` // Estimated: `4687 + m * (32 Β±0)` - // Minimum execution time: 7_481_000 picoseconds. - Weight::from_parts(7_959_053, 4687) - // Standard Error: 364 - .saturating_add(Weight::from_parts(18_653, 0).saturating_mul(m.into())) + // Minimum execution time: 5_954_000 picoseconds. + Weight::from_parts(6_544_638, 4687) + // Standard Error: 346 + .saturating_add(Weight::from_parts(17_638, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } - /// Storage: TechnicalMembership Prime (r:0 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TechnicalMembership::Prime` (r:0 w:1) + /// Proof: `TechnicalMembership::Prime` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TechnicalCommittee::Prime` (r:0 w:1) + /// Proof: `TechnicalCommittee::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn clear_prime() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_373_000 picoseconds. - Weight::from_parts(3_750_452, 0) + // Minimum execution time: 2_569_000 picoseconds. + Weight::from_parts(2_776_000, 0) .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/substrate/frame/message-queue/src/weights.rs b/substrate/frame/message-queue/src/weights.rs index e86f23e274f..001e2834e1c 100644 --- a/substrate/frame/message-queue/src/weights.rs +++ b/substrate/frame/message-queue/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_message_queue +//! Autogenerated weights for `pallet_message_queue` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/message-queue/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/message-queue/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_message_queue. +/// Weight functions needed for `pallet_message_queue`. pub trait WeightInfo { fn ready_ring_knit() -> Weight; fn ready_ring_unknit() -> Weight; @@ -64,246 +63,256 @@ pub trait WeightInfo { fn execute_overweight_page_updated() -> Weight; } -/// Weights for pallet_message_queue using the Substrate node and recommended hardware. +/// Weights for `pallet_message_queue` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `267` + // Measured: `301` // Estimated: `6038` - // Minimum execution time: 12_025_000 picoseconds. - Weight::from_parts(12_597_000, 6038) + // Minimum execution time: 11_563_000 picoseconds. + Weight::from_parts(11_956_000, 6038) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `267` + // Measured: `301` // Estimated: `6038` - // Minimum execution time: 11_563_000 picoseconds. - Weight::from_parts(11_785_000, 6038) + // Minimum execution time: 10_263_000 picoseconds. + Weight::from_parts(10_638_000, 6038) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3514` - // Minimum execution time: 4_467_000 picoseconds. - Weight::from_parts(4_655_000, 3514) + // Minimum execution time: 4_335_000 picoseconds. + Weight::from_parts(4_552_000, 3514) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: // Measured: `147` // Estimated: `69049` - // Minimum execution time: 6_103_000 picoseconds. - Weight::from_parts(6_254_000, 69049) + // Minimum execution time: 6_016_000 picoseconds. + Weight::from_parts(6_224_000, 69049) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: // Measured: `147` // Estimated: `69049` - // Minimum execution time: 6_320_000 picoseconds. - Weight::from_parts(6_565_000, 69049) + // Minimum execution time: 6_183_000 picoseconds. + Weight::from_parts(6_348_000, 69049) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 66_062_000 picoseconds. - Weight::from_parts(66_371_000, 0) + // Minimum execution time: 112_864_000 picoseconds. + Weight::from_parts(114_269_000, 0) + .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `246` // Estimated: `3514` - // Minimum execution time: 6_788_000 picoseconds. - Weight::from_parts(7_176_000, 3514) + // Minimum execution time: 6_665_000 picoseconds. + Weight::from_parts(7_108_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn reap_page() -> Weight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 52_865_000 picoseconds. - Weight::from_parts(54_398_000, 69049) + // Minimum execution time: 51_420_000 picoseconds. + Weight::from_parts(52_252_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 69_168_000 picoseconds. - Weight::from_parts(70_560_000, 69049) + // Minimum execution time: 71_195_000 picoseconds. + Weight::from_parts(72_981_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 80_947_000 picoseconds. - Weight::from_parts(82_715_000, 69049) + // Minimum execution time: 83_238_000 picoseconds. + Weight::from_parts(84_422_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `267` + // Measured: `301` // Estimated: `6038` - // Minimum execution time: 12_025_000 picoseconds. - Weight::from_parts(12_597_000, 6038) + // Minimum execution time: 11_563_000 picoseconds. + Weight::from_parts(11_956_000, 6038) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `267` + // Measured: `301` // Estimated: `6038` - // Minimum execution time: 11_563_000 picoseconds. - Weight::from_parts(11_785_000, 6038) + // Minimum execution time: 10_263_000 picoseconds. + Weight::from_parts(10_638_000, 6038) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3514` - // Minimum execution time: 4_467_000 picoseconds. - Weight::from_parts(4_655_000, 3514) + // Minimum execution time: 4_335_000 picoseconds. + Weight::from_parts(4_552_000, 3514) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: // Measured: `147` // Estimated: `69049` - // Minimum execution time: 6_103_000 picoseconds. - Weight::from_parts(6_254_000, 69049) + // Minimum execution time: 6_016_000 picoseconds. + Weight::from_parts(6_224_000, 69049) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: // Measured: `147` // Estimated: `69049` - // Minimum execution time: 6_320_000 picoseconds. - Weight::from_parts(6_565_000, 69049) + // Minimum execution time: 6_183_000 picoseconds. + Weight::from_parts(6_348_000, 69049) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 66_062_000 picoseconds. - Weight::from_parts(66_371_000, 0) + // Minimum execution time: 112_864_000 picoseconds. + Weight::from_parts(114_269_000, 0) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `246` // Estimated: `3514` - // Minimum execution time: 6_788_000 picoseconds. - Weight::from_parts(7_176_000, 3514) + // Minimum execution time: 6_665_000 picoseconds. + Weight::from_parts(7_108_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn reap_page() -> Weight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 52_865_000 picoseconds. - Weight::from_parts(54_398_000, 69049) + // Minimum execution time: 51_420_000 picoseconds. + Weight::from_parts(52_252_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 69_168_000 picoseconds. - Weight::from_parts(70_560_000, 69049) + // Minimum execution time: 71_195_000 picoseconds. + Weight::from_parts(72_981_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65584), added: 68059, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 80_947_000 picoseconds. - Weight::from_parts(82_715_000, 69049) + // Minimum execution time: 83_238_000 picoseconds. + Weight::from_parts(84_422_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/migrations/src/weights.rs b/substrate/frame/migrations/src/weights.rs index c9b63258c44..9eca48ac3fd 100644 --- a/substrate/frame/migrations/src/weights.rs +++ b/substrate/frame/migrations/src/weights.rs @@ -17,26 +17,29 @@ //! Autogenerated weights for `pallet_migrations` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `loud1`, CPU: `AMD EPYC 7282 16-Core Processor` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/release/substrate-node +// ./target/production/substrate-node // benchmark // pallet -// --chain -// dev -// --pallet -// pallet-migrations -// --extrinsic -// -// --output -// weight.rs -// --template -// ../../polkadot-sdk/substrate/.maintain/frame-weight-template.hbs +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_migrations +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/migrations/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -71,10 +74,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) fn onboard_new_mbms() -> Weight { // Proof Size summary in bytes: - // Measured: `243` + // Measured: `276` // Estimated: `67035` - // Minimum execution time: 13_980_000 picoseconds. - Weight::from_parts(14_290_000, 67035) + // Minimum execution time: 7_932_000 picoseconds. + Weight::from_parts(8_428_000, 67035) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -82,10 +85,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) fn progress_mbms_none() -> Weight { // Proof Size summary in bytes: - // Measured: `109` + // Measured: `142` // Estimated: `67035` - // Minimum execution time: 3_770_000 picoseconds. - Weight::from_parts(4_001_000, 67035) + // Minimum execution time: 2_229_000 picoseconds. + Weight::from_parts(2_329_000, 67035) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -96,8 +99,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `134` // Estimated: `3599` - // Minimum execution time: 10_900_000 picoseconds. - Weight::from_parts(11_251_000, 3599) + // Minimum execution time: 6_051_000 picoseconds. + Weight::from_parts(6_483_000, 3599) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -107,10 +110,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_skipped_historic() -> Weight { // Proof Size summary in bytes: - // Measured: `297` - // Estimated: `3762` - // Minimum execution time: 17_891_000 picoseconds. - Weight::from_parts(18_501_000, 3762) + // Measured: `330` + // Estimated: `3795` + // Minimum execution time: 10_066_000 picoseconds. + Weight::from_parts(10_713_000, 3795) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -119,10 +122,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_advance() -> Weight { // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3731` - // Minimum execution time: 18_271_000 picoseconds. - Weight::from_parts(18_740_000, 3731) + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 10_026_000 picoseconds. + Weight::from_parts(10_379_000, 3741) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -131,10 +134,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_complete() -> Weight { // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3731` - // Minimum execution time: 21_241_000 picoseconds. - Weight::from_parts(21_911_000, 3731) + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 11_680_000 picoseconds. + Weight::from_parts(12_184_000, 3741) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -146,10 +149,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) fn exec_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3731` - // Minimum execution time: 22_740_000 picoseconds. - Weight::from_parts(23_231_000, 3731) + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 12_334_000 picoseconds. + Weight::from_parts(12_899_000, 3741) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -157,8 +160,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 440_000 picoseconds. - Weight::from_parts(500_000, 0) + // Minimum execution time: 187_000 picoseconds. + Weight::from_parts(209_000, 0) } /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) @@ -166,8 +169,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_751_000 picoseconds. - Weight::from_parts(5_950_000, 0) + // Minimum execution time: 2_688_000 picoseconds. + Weight::from_parts(2_874_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) @@ -176,8 +179,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_350_000 picoseconds. - Weight::from_parts(6_560_000, 0) + // Minimum execution time: 3_108_000 picoseconds. + Weight::from_parts(3_263_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) @@ -186,10 +189,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) fn force_onboard_mbms() -> Weight { // Proof Size summary in bytes: - // Measured: `218` + // Measured: `251` // Estimated: `67035` - // Minimum execution time: 11_121_000 picoseconds. - Weight::from_parts(11_530_000, 67035) + // Minimum execution time: 5_993_000 picoseconds. + Weight::from_parts(6_359_000, 67035) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) @@ -197,12 +200,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 256]`. fn clear_historic(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1089 + n * (271 Β±0)` + // Measured: `1122 + n * (271 Β±0)` // Estimated: `3834 + n * (2740 Β±0)` - // Minimum execution time: 21_891_000 picoseconds. - Weight::from_parts(18_572_306, 3834) - // Standard Error: 3_236 - .saturating_add(Weight::from_parts(1_648_429, 0).saturating_mul(n.into())) + // Minimum execution time: 16_003_000 picoseconds. + Weight::from_parts(14_453_014, 3834) + // Standard Error: 3_305 + .saturating_add(Weight::from_parts(1_325_026, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -218,10 +221,10 @@ impl WeightInfo for () { /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) fn onboard_new_mbms() -> Weight { // Proof Size summary in bytes: - // Measured: `243` + // Measured: `276` // Estimated: `67035` - // Minimum execution time: 13_980_000 picoseconds. - Weight::from_parts(14_290_000, 67035) + // Minimum execution time: 7_932_000 picoseconds. + Weight::from_parts(8_428_000, 67035) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -229,10 +232,10 @@ impl WeightInfo for () { /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) fn progress_mbms_none() -> Weight { // Proof Size summary in bytes: - // Measured: `109` + // Measured: `142` // Estimated: `67035` - // Minimum execution time: 3_770_000 picoseconds. - Weight::from_parts(4_001_000, 67035) + // Minimum execution time: 2_229_000 picoseconds. + Weight::from_parts(2_329_000, 67035) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -243,8 +246,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `134` // Estimated: `3599` - // Minimum execution time: 10_900_000 picoseconds. - Weight::from_parts(11_251_000, 3599) + // Minimum execution time: 6_051_000 picoseconds. + Weight::from_parts(6_483_000, 3599) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -254,10 +257,10 @@ impl WeightInfo for () { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_skipped_historic() -> Weight { // Proof Size summary in bytes: - // Measured: `297` - // Estimated: `3762` - // Minimum execution time: 17_891_000 picoseconds. - Weight::from_parts(18_501_000, 3762) + // Measured: `330` + // Estimated: `3795` + // Minimum execution time: 10_066_000 picoseconds. + Weight::from_parts(10_713_000, 3795) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -266,10 +269,10 @@ impl WeightInfo for () { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_advance() -> Weight { // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3731` - // Minimum execution time: 18_271_000 picoseconds. - Weight::from_parts(18_740_000, 3731) + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 10_026_000 picoseconds. + Weight::from_parts(10_379_000, 3741) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -278,10 +281,10 @@ impl WeightInfo for () { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_complete() -> Weight { // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3731` - // Minimum execution time: 21_241_000 picoseconds. - Weight::from_parts(21_911_000, 3731) + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 11_680_000 picoseconds. + Weight::from_parts(12_184_000, 3741) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -293,10 +296,10 @@ impl WeightInfo for () { /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) fn exec_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3731` - // Minimum execution time: 22_740_000 picoseconds. - Weight::from_parts(23_231_000, 3731) + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 12_334_000 picoseconds. + Weight::from_parts(12_899_000, 3741) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -304,8 +307,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 440_000 picoseconds. - Weight::from_parts(500_000, 0) + // Minimum execution time: 187_000 picoseconds. + Weight::from_parts(209_000, 0) } /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) @@ -313,8 +316,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_751_000 picoseconds. - Weight::from_parts(5_950_000, 0) + // Minimum execution time: 2_688_000 picoseconds. + Weight::from_parts(2_874_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) @@ -323,8 +326,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_350_000 picoseconds. - Weight::from_parts(6_560_000, 0) + // Minimum execution time: 3_108_000 picoseconds. + Weight::from_parts(3_263_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) @@ -333,10 +336,10 @@ impl WeightInfo for () { /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) fn force_onboard_mbms() -> Weight { // Proof Size summary in bytes: - // Measured: `218` + // Measured: `251` // Estimated: `67035` - // Minimum execution time: 11_121_000 picoseconds. - Weight::from_parts(11_530_000, 67035) + // Minimum execution time: 5_993_000 picoseconds. + Weight::from_parts(6_359_000, 67035) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) @@ -344,12 +347,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 256]`. fn clear_historic(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1089 + n * (271 Β±0)` + // Measured: `1122 + n * (271 Β±0)` // Estimated: `3834 + n * (2740 Β±0)` - // Minimum execution time: 21_891_000 picoseconds. - Weight::from_parts(18_572_306, 3834) - // Standard Error: 3_236 - .saturating_add(Weight::from_parts(1_648_429, 0).saturating_mul(n.into())) + // Minimum execution time: 16_003_000 picoseconds. + Weight::from_parts(14_453_014, 3834) + // Standard Error: 3_305 + .saturating_add(Weight::from_parts(1_325_026, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) diff --git a/substrate/frame/multisig/src/weights.rs b/substrate/frame/multisig/src/weights.rs index 7b87d258d38..4cbe7bb03b7 100644 --- a/substrate/frame/multisig/src/weights.rs +++ b/substrate/frame/multisig/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_multisig +//! Autogenerated weights for `pallet_multisig` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/multisig/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/multisig/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_multisig. +/// Weight functions needed for `pallet_multisig`. pub trait WeightInfo { fn as_multi_threshold_1(z: u32, ) -> Weight; fn as_multi_create(s: u32, z: u32, ) -> Weight; @@ -61,220 +60,238 @@ pub trait WeightInfo { fn cancel_as_multi(s: u32, ) -> Weight; } -/// Weights for pallet_multisig using the Substrate node and recommended hardware. +/// Weights for `pallet_multisig` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `z` is `[0, 10000]`. fn as_multi_threshold_1(z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 13_452_000 picoseconds. - Weight::from_parts(14_425_869, 0) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 18_893_000 picoseconds. + Weight::from_parts(20_278_307, 3997) // Standard Error: 4 - .saturating_add(Weight::from_parts(493, 0).saturating_mul(z.into())) + .saturating_add(Weight::from_parts(488, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `301 + s * (2 Β±0)` // Estimated: `6811` - // Minimum execution time: 46_012_000 picoseconds. - Weight::from_parts(34_797_344, 6811) - // Standard Error: 833 - .saturating_add(Weight::from_parts(127_671, 0).saturating_mul(s.into())) - // Standard Error: 8 - .saturating_add(Weight::from_parts(1_498, 0).saturating_mul(z.into())) + // Minimum execution time: 39_478_000 picoseconds. + Weight::from_parts(29_195_487, 6811) + // Standard Error: 739 + .saturating_add(Weight::from_parts(118_766, 0).saturating_mul(s.into())) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_511, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[3, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_approve(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `320` // Estimated: `6811` - // Minimum execution time: 29_834_000 picoseconds. - Weight::from_parts(20_189_154, 6811) - // Standard Error: 637 - .saturating_add(Weight::from_parts(110_080, 0).saturating_mul(s.into())) - // Standard Error: 6 - .saturating_add(Weight::from_parts(1_483, 0).saturating_mul(z.into())) + // Minimum execution time: 26_401_000 picoseconds. + Weight::from_parts(17_277_296, 6811) + // Standard Error: 492 + .saturating_add(Weight::from_parts(101_763, 0).saturating_mul(s.into())) + // Standard Error: 4 + .saturating_add(Weight::from_parts(1_486, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `426 + s * (33 Β±0)` + // Measured: `571 + s * (33 Β±0)` // Estimated: `6811` - // Minimum execution time: 51_464_000 picoseconds. - Weight::from_parts(39_246_644, 6811) - // Standard Error: 1_251 - .saturating_add(Weight::from_parts(143_313, 0).saturating_mul(s.into())) + // Minimum execution time: 52_430_000 picoseconds. + Weight::from_parts(40_585_478, 6811) + // Standard Error: 1_240 + .saturating_add(Weight::from_parts(161_405, 0).saturating_mul(s.into())) // Standard Error: 12 - .saturating_add(Weight::from_parts(1_523, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(Weight::from_parts(1_547, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `301 + s * (2 Β±0)` // Estimated: `6811` - // Minimum execution time: 33_275_000 picoseconds. - Weight::from_parts(34_073_221, 6811) - // Standard Error: 1_163 - .saturating_add(Weight::from_parts(124_815, 0).saturating_mul(s.into())) + // Minimum execution time: 27_797_000 picoseconds. + Weight::from_parts(29_064_584, 6811) + // Standard Error: 817 + .saturating_add(Weight::from_parts(108_179, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_approve(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `320` // Estimated: `6811` - // Minimum execution time: 18_411_000 picoseconds. - Weight::from_parts(19_431_787, 6811) - // Standard Error: 694 - .saturating_add(Weight::from_parts(107_220, 0).saturating_mul(s.into())) + // Minimum execution time: 15_236_000 picoseconds. + Weight::from_parts(16_360_247, 6811) + // Standard Error: 584 + .saturating_add(Weight::from_parts(94_917, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn cancel_as_multi(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `492 + s * (1 Β±0)` // Estimated: `6811` - // Minimum execution time: 33_985_000 picoseconds. - Weight::from_parts(35_547_970, 6811) - // Standard Error: 1_135 - .saturating_add(Weight::from_parts(116_537, 0).saturating_mul(s.into())) + // Minimum execution time: 28_730_000 picoseconds. + Weight::from_parts(30_056_661, 6811) + // Standard Error: 792 + .saturating_add(Weight::from_parts(108_212, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `z` is `[0, 10000]`. fn as_multi_threshold_1(z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 13_452_000 picoseconds. - Weight::from_parts(14_425_869, 0) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 18_893_000 picoseconds. + Weight::from_parts(20_278_307, 3997) // Standard Error: 4 - .saturating_add(Weight::from_parts(493, 0).saturating_mul(z.into())) + .saturating_add(Weight::from_parts(488, 0).saturating_mul(z.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `301 + s * (2 Β±0)` // Estimated: `6811` - // Minimum execution time: 46_012_000 picoseconds. - Weight::from_parts(34_797_344, 6811) - // Standard Error: 833 - .saturating_add(Weight::from_parts(127_671, 0).saturating_mul(s.into())) - // Standard Error: 8 - .saturating_add(Weight::from_parts(1_498, 0).saturating_mul(z.into())) + // Minimum execution time: 39_478_000 picoseconds. + Weight::from_parts(29_195_487, 6811) + // Standard Error: 739 + .saturating_add(Weight::from_parts(118_766, 0).saturating_mul(s.into())) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_511, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[3, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_approve(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `320` // Estimated: `6811` - // Minimum execution time: 29_834_000 picoseconds. - Weight::from_parts(20_189_154, 6811) - // Standard Error: 637 - .saturating_add(Weight::from_parts(110_080, 0).saturating_mul(s.into())) - // Standard Error: 6 - .saturating_add(Weight::from_parts(1_483, 0).saturating_mul(z.into())) + // Minimum execution time: 26_401_000 picoseconds. + Weight::from_parts(17_277_296, 6811) + // Standard Error: 492 + .saturating_add(Weight::from_parts(101_763, 0).saturating_mul(s.into())) + // Standard Error: 4 + .saturating_add(Weight::from_parts(1_486, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `426 + s * (33 Β±0)` + // Measured: `571 + s * (33 Β±0)` // Estimated: `6811` - // Minimum execution time: 51_464_000 picoseconds. - Weight::from_parts(39_246_644, 6811) - // Standard Error: 1_251 - .saturating_add(Weight::from_parts(143_313, 0).saturating_mul(s.into())) + // Minimum execution time: 52_430_000 picoseconds. + Weight::from_parts(40_585_478, 6811) + // Standard Error: 1_240 + .saturating_add(Weight::from_parts(161_405, 0).saturating_mul(s.into())) // Standard Error: 12 - .saturating_add(Weight::from_parts(1_523, 0).saturating_mul(z.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(Weight::from_parts(1_547, 0).saturating_mul(z.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `301 + s * (2 Β±0)` // Estimated: `6811` - // Minimum execution time: 33_275_000 picoseconds. - Weight::from_parts(34_073_221, 6811) - // Standard Error: 1_163 - .saturating_add(Weight::from_parts(124_815, 0).saturating_mul(s.into())) + // Minimum execution time: 27_797_000 picoseconds. + Weight::from_parts(29_064_584, 6811) + // Standard Error: 817 + .saturating_add(Weight::from_parts(108_179, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_approve(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `320` // Estimated: `6811` - // Minimum execution time: 18_411_000 picoseconds. - Weight::from_parts(19_431_787, 6811) - // Standard Error: 694 - .saturating_add(Weight::from_parts(107_220, 0).saturating_mul(s.into())) + // Minimum execution time: 15_236_000 picoseconds. + Weight::from_parts(16_360_247, 6811) + // Standard Error: 584 + .saturating_add(Weight::from_parts(94_917, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn cancel_as_multi(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `492 + s * (1 Β±0)` // Estimated: `6811` - // Minimum execution time: 33_985_000 picoseconds. - Weight::from_parts(35_547_970, 6811) - // Standard Error: 1_135 - .saturating_add(Weight::from_parts(116_537, 0).saturating_mul(s.into())) + // Minimum execution time: 28_730_000 picoseconds. + Weight::from_parts(30_056_661, 6811) + // Standard Error: 792 + .saturating_add(Weight::from_parts(108_212, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/nft-fractionalization/src/weights.rs b/substrate/frame/nft-fractionalization/src/weights.rs index ebb4aa0fbcf..07872ebaea9 100644 --- a/substrate/frame/nft-fractionalization/src/weights.rs +++ b/substrate/frame/nft-fractionalization/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_nft_fractionalization +//! Autogenerated weights for `pallet_nft_fractionalization` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/nft-fractionalization/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/nft-fractionalization/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,136 +49,136 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_nft_fractionalization. +/// Weight functions needed for `pallet_nft_fractionalization`. pub trait WeightInfo { fn fractionalize() -> Weight; fn unify() -> Weight; } -/// Weights for pallet_nft_fractionalization using the Substrate node and recommended hardware. +/// Weights for `pallet_nft_fractionalization` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: NftFractionalization NftToAsset (r:0 w:1) - /// Proof: NftFractionalization NftToAsset (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + /// Storage: `NftFractionalization::NftToAsset` (r:0 w:1) + /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) fn fractionalize() -> Weight { // Proof Size summary in bytes: // Measured: `609` // Estimated: `4326` - // Minimum execution time: 187_416_000 picoseconds. - Weight::from_parts(191_131_000, 4326) + // Minimum execution time: 164_764_000 picoseconds. + Weight::from_parts(168_243_000, 4326) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } - /// Storage: NftFractionalization NftToAsset (r:1 w:1) - /// Proof: NftFractionalization NftToAsset (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `NftFractionalization::NftToAsset` (r:1 w:1) + /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn unify() -> Weight { // Proof Size summary in bytes: // Measured: `1422` // Estimated: `4326` - // Minimum execution time: 134_159_000 picoseconds. - Weight::from_parts(136_621_000, 4326) + // Minimum execution time: 120_036_000 picoseconds. + Weight::from_parts(123_550_000, 4326) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Assets Metadata (r:1 w:1) - /// Proof: Assets Metadata (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: NftFractionalization NftToAsset (r:0 w:1) - /// Proof: NftFractionalization NftToAsset (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + /// Storage: `NftFractionalization::NftToAsset` (r:0 w:1) + /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) fn fractionalize() -> Weight { // Proof Size summary in bytes: // Measured: `609` // Estimated: `4326` - // Minimum execution time: 187_416_000 picoseconds. - Weight::from_parts(191_131_000, 4326) + // Minimum execution time: 164_764_000 picoseconds. + Weight::from_parts(168_243_000, 4326) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } - /// Storage: NftFractionalization NftToAsset (r:1 w:1) - /// Proof: NftFractionalization NftToAsset (max_values: None, max_size: Some(92), added: 2567, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `NftFractionalization::NftToAsset` (r:1 w:1) + /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn unify() -> Weight { // Proof Size summary in bytes: // Measured: `1422` // Estimated: `4326` - // Minimum execution time: 134_159_000 picoseconds. - Weight::from_parts(136_621_000, 4326) + // Minimum execution time: 120_036_000 picoseconds. + Weight::from_parts(123_550_000, 4326) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } diff --git a/substrate/frame/nfts/src/weights.rs b/substrate/frame/nfts/src/weights.rs index 6b8c577bb12..4fcc1e601f4 100644 --- a/substrate/frame/nfts/src/weights.rs +++ b/substrate/frame/nfts/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_nfts +//! Autogenerated weights for `pallet_nfts` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -37,9 +37,9 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/nfts/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/nfts/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_nfts. +/// Weight functions needed for `pallet_nfts`. pub trait WeightInfo { fn create() -> Weight; fn force_create() -> Weight; @@ -92,564 +92,568 @@ pub trait WeightInfo { fn set_attributes_pre_signed(n: u32, ) -> Weight; } -/// Weights for pallet_nfts using the Substrate node and recommended hardware. +/// Weights for `pallet_nfts` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Nfts NextCollectionId (r:1 w:1) - /// Proof: Nfts NextCollectionId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:0 w:1) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:0 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:1) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::NextCollectionId` (r:1 w:1) + /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3549` - // Minimum execution time: 40_489_000 picoseconds. - Weight::from_parts(41_320_000, 3549) + // Minimum execution time: 34_035_000 picoseconds. + Weight::from_parts(35_228_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Nfts NextCollectionId (r:1 w:1) - /// Proof: Nfts NextCollectionId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:0 w:1) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:0 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:1) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::NextCollectionId` (r:1 w:1) + /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3549` - // Minimum execution time: 23_257_000 picoseconds. - Weight::from_parts(23_770_000, 3549) + // Minimum execution time: 19_430_000 picoseconds. + Weight::from_parts(20_054_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:0) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:1) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1001 w:1000) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1000 w:1000) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts CollectionMetadataOf (r:0 w:1) - /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:0 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:1) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1001 w:1000) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1000 w:1000) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:0 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// The range of component `m` is `[0, 1000]`. /// The range of component `c` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `32220 + a * (332 Β±0)` - // Estimated: `2523990 + a * (2921 Β±0)` - // Minimum execution time: 1_310_198_000 picoseconds. - Weight::from_parts(1_479_261_043, 2523990) - // Standard Error: 4_415 - .saturating_add(Weight::from_parts(6_016_212, 0).saturating_mul(a.into())) + // Measured: `32204 + a * (366 Β±0)` + // Estimated: `2523990 + a * (2954 Β±0)` + // Minimum execution time: 1_249_733_000 picoseconds. + Weight::from_parts(1_293_703_849, 2523990) + // Standard Error: 4_764 + .saturating_add(Weight::from_parts(6_433_523, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(1004_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(T::DbWeight::get().writes(1005_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2921).saturating_mul(a.into())) - } - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(a.into())) + } + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn mint() -> Weight { // Proof Size summary in bytes: // Measured: `455` // Estimated: `4326` - // Minimum execution time: 51_910_000 picoseconds. - Weight::from_parts(53_441_000, 4326) + // Minimum execution time: 48_645_000 picoseconds. + Weight::from_parts(50_287_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn force_mint() -> Weight { // Proof Size summary in bytes: // Measured: `455` // Estimated: `4326` - // Minimum execution time: 50_168_000 picoseconds. - Weight::from_parts(51_380_000, 4326) + // Minimum execution time: 46_688_000 picoseconds. + Weight::from_parts(47_680_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:0) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Nfts ItemAttributesApprovalsOf (r:0 w:1) - /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:0 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(681), added: 3156, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn burn() -> Weight { // Proof Size summary in bytes: // Measured: `564` // Estimated: `4326` - // Minimum execution time: 50_738_000 picoseconds. - Weight::from_parts(51_850_000, 4326) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Minimum execution time: 51_771_000 picoseconds. + Weight::from_parts(53_492_000, 4326) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:0) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:2) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:2) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: // Measured: `593` // Estimated: `4326` - // Minimum execution time: 41_055_000 picoseconds. - Weight::from_parts(42_336_000, 4326) + // Minimum execution time: 39_166_000 picoseconds. + Weight::from_parts(40_128_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:5000 w:5000) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:5000 w:5000) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `763 + i * (108 Β±0)` // Estimated: `3549 + i * (3336 Β±0)` - // Minimum execution time: 15_688_000 picoseconds. - Weight::from_parts(15_921_000, 3549) - // Standard Error: 14_827 - .saturating_add(Weight::from_parts(17_105_395, 0).saturating_mul(i.into())) + // Minimum execution time: 13_804_000 picoseconds. + Weight::from_parts(14_159_000, 3549) + // Standard Error: 13_812 + .saturating_add(Weight::from_parts(14_661_284, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3336).saturating_mul(i.into())) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn lock_item_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 19_981_000 picoseconds. - Weight::from_parts(20_676_000, 3534) + // Minimum execution time: 17_485_000 picoseconds. + Weight::from_parts(18_412_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn unlock_item_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 19_911_000 picoseconds. - Weight::from_parts(20_612_000, 3534) + // Minimum execution time: 17_335_000 picoseconds. + Weight::from_parts(18_543_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn lock_collection() -> Weight { // Proof Size summary in bytes: // Measured: `340` // Estimated: `3549` - // Minimum execution time: 16_441_000 picoseconds. - Weight::from_parts(16_890_000, 3549) + // Minimum execution time: 14_608_000 picoseconds. + Weight::from_parts(15_696_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts OwnershipAcceptance (r:1 w:1) - /// Proof: Nfts OwnershipAcceptance (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:2) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:2) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `3549` - // Minimum execution time: 22_610_000 picoseconds. - Weight::from_parts(23_422_000, 3549) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Measured: `562` + // Estimated: `3593` + // Minimum execution time: 25_686_000 picoseconds. + Weight::from_parts(26_433_000, 3593) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:2 w:4) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:2 w:4) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn set_team() -> Weight { // Proof Size summary in bytes: // Measured: `369` // Estimated: `6078` - // Minimum execution time: 39_739_000 picoseconds. - Weight::from_parts(41_306_000, 6078) + // Minimum execution time: 37_192_000 picoseconds. + Weight::from_parts(38_561_000, 6078) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:2) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:2) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_collection_owner() -> Weight { // Proof Size summary in bytes: // Measured: `311` // Estimated: `3549` - // Minimum execution time: 17_685_000 picoseconds. - Weight::from_parts(18_258_000, 3549) + // Minimum execution time: 15_401_000 picoseconds. + Weight::from_parts(15_826_000, 3549) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:0 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn force_collection_config() -> Weight { // Proof Size summary in bytes: // Measured: `276` // Estimated: `3549` - // Minimum execution time: 13_734_000 picoseconds. - Weight::from_parts(14_337_000, 3549) + // Minimum execution time: 11_683_000 picoseconds. + Weight::from_parts(12_255_000, 3549) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn lock_item_properties() -> Weight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 19_269_000 picoseconds. - Weight::from_parts(19_859_000, 3534) + // Minimum execution time: 16_899_000 picoseconds. + Weight::from_parts(17_404_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) fn set_attribute() -> Weight { // Proof Size summary in bytes: // Measured: `539` - // Estimated: `3911` - // Minimum execution time: 51_540_000 picoseconds. - Weight::from_parts(52_663_000, 3911) + // Estimated: `3944` + // Minimum execution time: 46_768_000 picoseconds. + Weight::from_parts(47_834_000, 3944) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) fn force_set_attribute() -> Weight { // Proof Size summary in bytes: // Measured: `344` - // Estimated: `3911` - // Minimum execution time: 26_529_000 picoseconds. - Weight::from_parts(27_305_000, 3911) + // Estimated: `3944` + // Minimum execution time: 23_356_000 picoseconds. + Weight::from_parts(24_528_000, 3944) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `950` - // Estimated: `3911` - // Minimum execution time: 46_951_000 picoseconds. - Weight::from_parts(48_481_000, 3911) + // Measured: `983` + // Estimated: `3944` + // Minimum execution time: 43_061_000 picoseconds. + Weight::from_parts(44_024_000, 3944) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) - /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(681), added: 3156, mode: `MaxEncodedLen`) fn approve_item_attributes() -> Weight { // Proof Size summary in bytes: // Measured: `381` // Estimated: `4326` - // Minimum execution time: 17_222_000 picoseconds. - Weight::from_parts(17_819_000, 4326) + // Minimum execution time: 14_929_000 picoseconds. + Weight::from_parts(15_344_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) - /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1001 w:1000) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(681), added: 3156, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1001 w:1000) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn cancel_item_attributes_approval(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `837 + n * (364 Β±0)` - // Estimated: `4326 + n * (2921 Β±0)` - // Minimum execution time: 26_185_000 picoseconds. - Weight::from_parts(27_038_000, 4326) - // Standard Error: 2_378 - .saturating_add(Weight::from_parts(6_067_888, 0).saturating_mul(n.into())) + // Measured: `831 + n * (398 Β±0)` + // Estimated: `4326 + n * (2954 Β±0)` + // Minimum execution time: 23_707_000 picoseconds. + Weight::from_parts(24_688_000, 4326) + // Standard Error: 3_813 + .saturating_add(Weight::from_parts(6_422_256, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2921).saturating_mul(n.into())) - } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:1) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) fn set_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `539` - // Estimated: `3605` - // Minimum execution time: 42_120_000 picoseconds. - Weight::from_parts(43_627_000, 3605) + // Estimated: `3812` + // Minimum execution time: 37_882_000 picoseconds. + Weight::from_parts(39_222_000, 3812) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:1) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `642` - // Estimated: `3605` - // Minimum execution time: 40_732_000 picoseconds. - Weight::from_parts(42_760_000, 3605) + // Measured: `849` + // Estimated: `3812` + // Minimum execution time: 36_629_000 picoseconds. + Weight::from_parts(37_351_000, 3812) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionMetadataOf (r:1 w:1) - /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `398` - // Estimated: `3552` - // Minimum execution time: 39_443_000 picoseconds. - Weight::from_parts(40_482_000, 3552) + // Estimated: `3759` + // Minimum execution time: 34_853_000 picoseconds. + Weight::from_parts(35_914_000, 3759) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts CollectionMetadataOf (r:1 w:1) - /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `509` - // Estimated: `3552` - // Minimum execution time: 37_676_000 picoseconds. - Weight::from_parts(39_527_000, 3552) + // Measured: `716` + // Estimated: `3759` + // Minimum execution time: 33_759_000 picoseconds. + Weight::from_parts(34_729_000, 3759) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn approve_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `410` // Estimated: `4326` - // Minimum execution time: 20_787_000 picoseconds. - Weight::from_parts(21_315_000, 4326) + // Minimum execution time: 17_583_000 picoseconds. + Weight::from_parts(18_675_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) fn cancel_approval() -> Weight { // Proof Size summary in bytes: // Measured: `418` // Estimated: `4326` - // Minimum execution time: 18_200_000 picoseconds. - Weight::from_parts(19_064_000, 4326) + // Minimum execution time: 15_036_000 picoseconds. + Weight::from_parts(15_995_000, 4326) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) fn clear_all_transfer_approvals() -> Weight { // Proof Size summary in bytes: // Measured: `418` // Estimated: `4326` - // Minimum execution time: 17_128_000 picoseconds. - Weight::from_parts(17_952_000, 4326) + // Minimum execution time: 14_666_000 picoseconds. + Weight::from_parts(15_152_000, 4326) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts OwnershipAcceptance (r:1 w:1) - /// Proof: Nfts OwnershipAcceptance (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_accept_ownership() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3517` - // Minimum execution time: 14_667_000 picoseconds. - Weight::from_parts(15_262_000, 3517) + // Minimum execution time: 12_393_000 picoseconds. + Weight::from_parts(12_895_000, 3517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts CollectionConfigOf (r:1 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) fn set_collection_max_supply() -> Weight { // Proof Size summary in bytes: // Measured: `340` // Estimated: `3549` - // Minimum execution time: 18_435_000 picoseconds. - Weight::from_parts(18_775_000, 3549) + // Minimum execution time: 16_034_000 picoseconds. + Weight::from_parts(16_617_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn update_mint_settings() -> Weight { // Proof Size summary in bytes: // Measured: `323` // Estimated: `3538` - // Minimum execution time: 18_125_000 picoseconds. - Weight::from_parts(18_415_000, 3538) + // Minimum execution time: 15_812_000 picoseconds. + Weight::from_parts(16_644_000, 3538) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn set_price() -> Weight { // Proof Size summary in bytes: // Measured: `518` // Estimated: `4326` - // Minimum execution time: 23_237_000 picoseconds. - Weight::from_parts(24_128_000, 4326) + // Minimum execution time: 21_650_000 picoseconds. + Weight::from_parts(22_443_000, 4326) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:1 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:0) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:2) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:1 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:2) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn buy_item() -> Weight { // Proof Size summary in bytes: // Measured: `705` // Estimated: `4326` - // Minimum execution time: 53_291_000 picoseconds. - Weight::from_parts(54_614_000, 4326) + // Minimum execution time: 49_463_000 picoseconds. + Weight::from_parts(50_937_000, 4326) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -658,681 +662,685 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_192_000 picoseconds. - Weight::from_parts(4_039_901, 0) - // Standard Error: 10_309 - .saturating_add(Weight::from_parts(3_934_017, 0).saturating_mul(n.into())) - } - /// Storage: Nfts Item (r:2 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + // Minimum execution time: 2_029_000 picoseconds. + Weight::from_parts(3_749_829, 0) + // Standard Error: 8_497 + .saturating_add(Weight::from_parts(1_913_514, 0).saturating_mul(n.into())) + } + /// Storage: `Nfts::Item` (r:2 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn create_swap() -> Weight { // Proof Size summary in bytes: // Measured: `494` // Estimated: `7662` - // Minimum execution time: 21_011_000 picoseconds. - Weight::from_parts(22_065_000, 7662) + // Minimum execution time: 18_181_000 picoseconds. + Weight::from_parts(18_698_000, 7662) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts PendingSwapOf (r:1 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: `Nfts::PendingSwapOf` (r:1 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) fn cancel_swap() -> Weight { // Proof Size summary in bytes: // Measured: `513` // Estimated: `4326` - // Minimum execution time: 21_423_000 picoseconds. - Weight::from_parts(21_743_000, 4326) + // Minimum execution time: 18_228_000 picoseconds. + Weight::from_parts(18_940_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:2 w:2) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:1 w:2) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:2 w:0) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:2 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:4) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:2) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:2 w:2) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:1 w:2) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:2 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:2 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:4) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:2) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn claim_swap() -> Weight { // Proof Size summary in bytes: // Measured: `834` // Estimated: `7662` - // Minimum execution time: 86_059_000 picoseconds. - Weight::from_parts(88_401_000, 7662) + // Minimum execution time: 77_983_000 picoseconds. + Weight::from_parts(79_887_000, 7662) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } - /// Storage: Nfts CollectionRoleOf (r:2 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:10 w:10) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:1) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:2 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:10 w:10) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 10]`. fn mint_pre_signed(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `629` - // Estimated: `6078 + n * (2921 Β±0)` - // Minimum execution time: 146_746_000 picoseconds. - Weight::from_parts(152_885_862, 6078) - // Standard Error: 40_442 - .saturating_add(Weight::from_parts(32_887_800, 0).saturating_mul(n.into())) + // Estimated: `6078 + n * (2954 Β±0)` + // Minimum execution time: 126_998_000 picoseconds. + Weight::from_parts(134_149_389, 6078) + // Standard Error: 33_180 + .saturating_add(Weight::from_parts(30_711_206, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2921).saturating_mul(n.into())) - } - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) - /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:10 w:10) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) + } + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(681), added: 3156, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:10 w:10) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 10]`. fn set_attributes_pre_signed(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `659` - // Estimated: `4326 + n * (2921 Β±0)` - // Minimum execution time: 83_960_000 picoseconds. - Weight::from_parts(98_609_885, 4326) - // Standard Error: 85_991 - .saturating_add(Weight::from_parts(32_633_495, 0).saturating_mul(n.into())) + // Estimated: `4326 + n * (2954 Β±0)` + // Minimum execution time: 66_213_000 picoseconds. + Weight::from_parts(81_661_819, 4326) + // Standard Error: 87_003 + .saturating_add(Weight::from_parts(29_550_476, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2921).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Nfts NextCollectionId (r:1 w:1) - /// Proof: Nfts NextCollectionId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:0 w:1) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:0 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:1) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::NextCollectionId` (r:1 w:1) + /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3549` - // Minimum execution time: 40_489_000 picoseconds. - Weight::from_parts(41_320_000, 3549) + // Minimum execution time: 34_035_000 picoseconds. + Weight::from_parts(35_228_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Nfts NextCollectionId (r:1 w:1) - /// Proof: Nfts NextCollectionId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:0 w:1) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:0 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:1) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::NextCollectionId` (r:1 w:1) + /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3549` - // Minimum execution time: 23_257_000 picoseconds. - Weight::from_parts(23_770_000, 3549) + // Minimum execution time: 19_430_000 picoseconds. + Weight::from_parts(20_054_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:0) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:1) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1001 w:1000) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1000 w:1000) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts CollectionMetadataOf (r:0 w:1) - /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:0 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:1) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1001 w:1000) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1000 w:1000) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:0 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// The range of component `m` is `[0, 1000]`. /// The range of component `c` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `32220 + a * (332 Β±0)` - // Estimated: `2523990 + a * (2921 Β±0)` - // Minimum execution time: 1_310_198_000 picoseconds. - Weight::from_parts(1_479_261_043, 2523990) - // Standard Error: 4_415 - .saturating_add(Weight::from_parts(6_016_212, 0).saturating_mul(a.into())) + // Measured: `32204 + a * (366 Β±0)` + // Estimated: `2523990 + a * (2954 Β±0)` + // Minimum execution time: 1_249_733_000 picoseconds. + Weight::from_parts(1_293_703_849, 2523990) + // Standard Error: 4_764 + .saturating_add(Weight::from_parts(6_433_523, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(1004_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(RocksDbWeight::get().writes(1005_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2921).saturating_mul(a.into())) - } - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(a.into())) + } + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn mint() -> Weight { // Proof Size summary in bytes: // Measured: `455` // Estimated: `4326` - // Minimum execution time: 51_910_000 picoseconds. - Weight::from_parts(53_441_000, 4326) + // Minimum execution time: 48_645_000 picoseconds. + Weight::from_parts(50_287_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn force_mint() -> Weight { // Proof Size summary in bytes: // Measured: `455` // Estimated: `4326` - // Minimum execution time: 50_168_000 picoseconds. - Weight::from_parts(51_380_000, 4326) + // Minimum execution time: 46_688_000 picoseconds. + Weight::from_parts(47_680_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:0) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Nfts ItemAttributesApprovalsOf (r:0 w:1) - /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:0 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(681), added: 3156, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn burn() -> Weight { // Proof Size summary in bytes: // Measured: `564` // Estimated: `4326` - // Minimum execution time: 50_738_000 picoseconds. - Weight::from_parts(51_850_000, 4326) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Minimum execution time: 51_771_000 picoseconds. + Weight::from_parts(53_492_000, 4326) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:0) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:2) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:2) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: // Measured: `593` // Estimated: `4326` - // Minimum execution time: 41_055_000 picoseconds. - Weight::from_parts(42_336_000, 4326) + // Minimum execution time: 39_166_000 picoseconds. + Weight::from_parts(40_128_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:5000 w:5000) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:5000 w:5000) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `763 + i * (108 Β±0)` // Estimated: `3549 + i * (3336 Β±0)` - // Minimum execution time: 15_688_000 picoseconds. - Weight::from_parts(15_921_000, 3549) - // Standard Error: 14_827 - .saturating_add(Weight::from_parts(17_105_395, 0).saturating_mul(i.into())) + // Minimum execution time: 13_804_000 picoseconds. + Weight::from_parts(14_159_000, 3549) + // Standard Error: 13_812 + .saturating_add(Weight::from_parts(14_661_284, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3336).saturating_mul(i.into())) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn lock_item_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 19_981_000 picoseconds. - Weight::from_parts(20_676_000, 3534) + // Minimum execution time: 17_485_000 picoseconds. + Weight::from_parts(18_412_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn unlock_item_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 19_911_000 picoseconds. - Weight::from_parts(20_612_000, 3534) + // Minimum execution time: 17_335_000 picoseconds. + Weight::from_parts(18_543_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn lock_collection() -> Weight { // Proof Size summary in bytes: // Measured: `340` // Estimated: `3549` - // Minimum execution time: 16_441_000 picoseconds. - Weight::from_parts(16_890_000, 3549) + // Minimum execution time: 14_608_000 picoseconds. + Weight::from_parts(15_696_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts OwnershipAcceptance (r:1 w:1) - /// Proof: Nfts OwnershipAcceptance (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:2) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:2) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `3549` - // Minimum execution time: 22_610_000 picoseconds. - Weight::from_parts(23_422_000, 3549) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Measured: `562` + // Estimated: `3593` + // Minimum execution time: 25_686_000 picoseconds. + Weight::from_parts(26_433_000, 3593) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:2 w:4) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:2 w:4) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn set_team() -> Weight { // Proof Size summary in bytes: // Measured: `369` // Estimated: `6078` - // Minimum execution time: 39_739_000 picoseconds. - Weight::from_parts(41_306_000, 6078) + // Minimum execution time: 37_192_000 picoseconds. + Weight::from_parts(38_561_000, 6078) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionAccount (r:0 w:2) - /// Proof: Nfts CollectionAccount (max_values: None, max_size: Some(68), added: 2543, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:2) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_collection_owner() -> Weight { // Proof Size summary in bytes: // Measured: `311` // Estimated: `3549` - // Minimum execution time: 17_685_000 picoseconds. - Weight::from_parts(18_258_000, 3549) + // Minimum execution time: 15_401_000 picoseconds. + Weight::from_parts(15_826_000, 3549) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:0 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn force_collection_config() -> Weight { // Proof Size summary in bytes: // Measured: `276` // Estimated: `3549` - // Minimum execution time: 13_734_000 picoseconds. - Weight::from_parts(14_337_000, 3549) + // Minimum execution time: 11_683_000 picoseconds. + Weight::from_parts(12_255_000, 3549) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn lock_item_properties() -> Weight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 19_269_000 picoseconds. - Weight::from_parts(19_859_000, 3534) + // Minimum execution time: 16_899_000 picoseconds. + Weight::from_parts(17_404_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) fn set_attribute() -> Weight { // Proof Size summary in bytes: // Measured: `539` - // Estimated: `3911` - // Minimum execution time: 51_540_000 picoseconds. - Weight::from_parts(52_663_000, 3911) + // Estimated: `3944` + // Minimum execution time: 46_768_000 picoseconds. + Weight::from_parts(47_834_000, 3944) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) fn force_set_attribute() -> Weight { // Proof Size summary in bytes: // Measured: `344` - // Estimated: `3911` - // Minimum execution time: 26_529_000 picoseconds. - Weight::from_parts(27_305_000, 3911) + // Estimated: `3944` + // Minimum execution time: 23_356_000 picoseconds. + Weight::from_parts(24_528_000, 3944) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Nfts Attribute (r:1 w:1) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `950` - // Estimated: `3911` - // Minimum execution time: 46_951_000 picoseconds. - Weight::from_parts(48_481_000, 3911) + // Measured: `983` + // Estimated: `3944` + // Minimum execution time: 43_061_000 picoseconds. + Weight::from_parts(44_024_000, 3944) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) - /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(681), added: 3156, mode: `MaxEncodedLen`) fn approve_item_attributes() -> Weight { // Proof Size summary in bytes: // Measured: `381` // Estimated: `4326` - // Minimum execution time: 17_222_000 picoseconds. - Weight::from_parts(17_819_000, 4326) + // Minimum execution time: 14_929_000 picoseconds. + Weight::from_parts(15_344_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) - /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1001 w:1000) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(681), added: 3156, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1001 w:1000) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn cancel_item_attributes_approval(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `837 + n * (364 Β±0)` - // Estimated: `4326 + n * (2921 Β±0)` - // Minimum execution time: 26_185_000 picoseconds. - Weight::from_parts(27_038_000, 4326) - // Standard Error: 2_378 - .saturating_add(Weight::from_parts(6_067_888, 0).saturating_mul(n.into())) + // Measured: `831 + n * (398 Β±0)` + // Estimated: `4326 + n * (2954 Β±0)` + // Minimum execution time: 23_707_000 picoseconds. + Weight::from_parts(24_688_000, 4326) + // Standard Error: 3_813 + .saturating_add(Weight::from_parts(6_422_256, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2921).saturating_mul(n.into())) - } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:1) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) fn set_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `539` - // Estimated: `3605` - // Minimum execution time: 42_120_000 picoseconds. - Weight::from_parts(43_627_000, 3605) + // Estimated: `3812` + // Minimum execution time: 37_882_000 picoseconds. + Weight::from_parts(39_222_000, 3812) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:1) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `642` - // Estimated: `3605` - // Minimum execution time: 40_732_000 picoseconds. - Weight::from_parts(42_760_000, 3605) + // Measured: `849` + // Estimated: `3812` + // Minimum execution time: 36_629_000 picoseconds. + Weight::from_parts(37_351_000, 3812) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionMetadataOf (r:1 w:1) - /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `398` - // Estimated: `3552` - // Minimum execution time: 39_443_000 picoseconds. - Weight::from_parts(40_482_000, 3552) + // Estimated: `3759` + // Minimum execution time: 34_853_000 picoseconds. + Weight::from_parts(35_914_000, 3759) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts CollectionMetadataOf (r:1 w:1) - /// Proof: Nfts CollectionMetadataOf (max_values: None, max_size: Some(87), added: 2562, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `509` - // Estimated: `3552` - // Minimum execution time: 37_676_000 picoseconds. - Weight::from_parts(39_527_000, 3552) + // Measured: `716` + // Estimated: `3759` + // Minimum execution time: 33_759_000 picoseconds. + Weight::from_parts(34_729_000, 3759) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn approve_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `410` // Estimated: `4326` - // Minimum execution time: 20_787_000 picoseconds. - Weight::from_parts(21_315_000, 4326) + // Minimum execution time: 17_583_000 picoseconds. + Weight::from_parts(18_675_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) fn cancel_approval() -> Weight { // Proof Size summary in bytes: // Measured: `418` // Estimated: `4326` - // Minimum execution time: 18_200_000 picoseconds. - Weight::from_parts(19_064_000, 4326) + // Minimum execution time: 15_036_000 picoseconds. + Weight::from_parts(15_995_000, 4326) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) fn clear_all_transfer_approvals() -> Weight { // Proof Size summary in bytes: // Measured: `418` // Estimated: `4326` - // Minimum execution time: 17_128_000 picoseconds. - Weight::from_parts(17_952_000, 4326) + // Minimum execution time: 14_666_000 picoseconds. + Weight::from_parts(15_152_000, 4326) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts OwnershipAcceptance (r:1 w:1) - /// Proof: Nfts OwnershipAcceptance (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_accept_ownership() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3517` - // Minimum execution time: 14_667_000 picoseconds. - Weight::from_parts(15_262_000, 3517) + // Minimum execution time: 12_393_000 picoseconds. + Weight::from_parts(12_895_000, 3517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts CollectionConfigOf (r:1 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) fn set_collection_max_supply() -> Weight { // Proof Size summary in bytes: // Measured: `340` // Estimated: `3549` - // Minimum execution time: 18_435_000 picoseconds. - Weight::from_parts(18_775_000, 3549) + // Minimum execution time: 16_034_000 picoseconds. + Weight::from_parts(16_617_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts CollectionRoleOf (r:1 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:1) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn update_mint_settings() -> Weight { // Proof Size summary in bytes: // Measured: `323` // Estimated: `3538` - // Minimum execution time: 18_125_000 picoseconds. - Weight::from_parts(18_415_000, 3538) + // Minimum execution time: 15_812_000 picoseconds. + Weight::from_parts(16_644_000, 3538) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn set_price() -> Weight { // Proof Size summary in bytes: // Measured: `518` // Estimated: `4326` - // Minimum execution time: 23_237_000 picoseconds. - Weight::from_parts(24_128_000, 4326) + // Minimum execution time: 21_650_000 picoseconds. + Weight::from_parts(22_443_000, 4326) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:1 w:1) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:1 w:0) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:2) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:1 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:2) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn buy_item() -> Weight { // Proof Size summary in bytes: // Measured: `705` // Estimated: `4326` - // Minimum execution time: 53_291_000 picoseconds. - Weight::from_parts(54_614_000, 4326) + // Minimum execution time: 49_463_000 picoseconds. + Weight::from_parts(50_937_000, 4326) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1341,120 +1349,120 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_192_000 picoseconds. - Weight::from_parts(4_039_901, 0) - // Standard Error: 10_309 - .saturating_add(Weight::from_parts(3_934_017, 0).saturating_mul(n.into())) - } - /// Storage: Nfts Item (r:2 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:0 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) + // Minimum execution time: 2_029_000 picoseconds. + Weight::from_parts(3_749_829, 0) + // Standard Error: 8_497 + .saturating_add(Weight::from_parts(1_913_514, 0).saturating_mul(n.into())) + } + /// Storage: `Nfts::Item` (r:2 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) fn create_swap() -> Weight { // Proof Size summary in bytes: // Measured: `494` // Estimated: `7662` - // Minimum execution time: 21_011_000 picoseconds. - Weight::from_parts(22_065_000, 7662) + // Minimum execution time: 18_181_000 picoseconds. + Weight::from_parts(18_698_000, 7662) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts PendingSwapOf (r:1 w:1) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) + /// Storage: `Nfts::PendingSwapOf` (r:1 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) fn cancel_swap() -> Weight { // Proof Size summary in bytes: // Measured: `513` // Estimated: `4326` - // Minimum execution time: 21_423_000 picoseconds. - Weight::from_parts(21_743_000, 4326) + // Minimum execution time: 18_228_000 picoseconds. + Weight::from_parts(18_940_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nfts Item (r:2 w:2) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts PendingSwapOf (r:1 w:2) - /// Proof: Nfts PendingSwapOf (max_values: None, max_size: Some(71), added: 2546, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:0) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:2 w:0) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:2 w:0) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:4) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) - /// Storage: Nfts ItemPriceOf (r:0 w:2) - /// Proof: Nfts ItemPriceOf (max_values: None, max_size: Some(89), added: 2564, mode: MaxEncodedLen) + /// Storage: `Nfts::Item` (r:2 w:2) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:1 w:2) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:2 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:2 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:4) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:2) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn claim_swap() -> Weight { // Proof Size summary in bytes: // Measured: `834` // Estimated: `7662` - // Minimum execution time: 86_059_000 picoseconds. - Weight::from_parts(88_401_000, 7662) + // Minimum execution time: 77_983_000 picoseconds. + Weight::from_parts(79_887_000, 7662) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } - /// Storage: Nfts CollectionRoleOf (r:2 w:0) - /// Proof: Nfts CollectionRoleOf (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Item (r:1 w:1) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts ItemConfigOf (r:1 w:1) - /// Proof: Nfts ItemConfigOf (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:10 w:10) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: Nfts ItemMetadataOf (r:1 w:1) - /// Proof: Nfts ItemMetadataOf (max_values: None, max_size: Some(140), added: 2615, mode: MaxEncodedLen) - /// Storage: Nfts Account (r:0 w:1) - /// Proof: Nfts Account (max_values: None, max_size: Some(88), added: 2563, mode: MaxEncodedLen) + /// Storage: `Nfts::CollectionRoleOf` (r:2 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:10 w:10) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 10]`. fn mint_pre_signed(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `629` - // Estimated: `6078 + n * (2921 Β±0)` - // Minimum execution time: 146_746_000 picoseconds. - Weight::from_parts(152_885_862, 6078) - // Standard Error: 40_442 - .saturating_add(Weight::from_parts(32_887_800, 0).saturating_mul(n.into())) + // Estimated: `6078 + n * (2954 Β±0)` + // Minimum execution time: 126_998_000 picoseconds. + Weight::from_parts(134_149_389, 6078) + // Standard Error: 33_180 + .saturating_add(Weight::from_parts(30_711_206, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2921).saturating_mul(n.into())) - } - /// Storage: Nfts Item (r:1 w:0) - /// Proof: Nfts Item (max_values: None, max_size: Some(861), added: 3336, mode: MaxEncodedLen) - /// Storage: Nfts ItemAttributesApprovalsOf (r:1 w:1) - /// Proof: Nfts ItemAttributesApprovalsOf (max_values: None, max_size: Some(681), added: 3156, mode: MaxEncodedLen) - /// Storage: Nfts CollectionConfigOf (r:1 w:0) - /// Proof: Nfts CollectionConfigOf (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) - /// Storage: Nfts Collection (r:1 w:1) - /// Proof: Nfts Collection (max_values: None, max_size: Some(84), added: 2559, mode: MaxEncodedLen) - /// Storage: Nfts Attribute (r:10 w:10) - /// Proof: Nfts Attribute (max_values: None, max_size: Some(446), added: 2921, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) + } + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(681), added: 3156, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:10 w:10) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 10]`. fn set_attributes_pre_signed(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `659` - // Estimated: `4326 + n * (2921 Β±0)` - // Minimum execution time: 83_960_000 picoseconds. - Weight::from_parts(98_609_885, 4326) - // Standard Error: 85_991 - .saturating_add(Weight::from_parts(32_633_495, 0).saturating_mul(n.into())) + // Estimated: `4326 + n * (2954 Β±0)` + // Minimum execution time: 66_213_000 picoseconds. + Weight::from_parts(81_661_819, 4326) + // Standard Error: 87_003 + .saturating_add(Weight::from_parts(29_550_476, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2921).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) } } diff --git a/substrate/frame/nis/src/weights.rs b/substrate/frame/nis/src/weights.rs index cba2f004905..63908271391 100644 --- a/substrate/frame/nis/src/weights.rs +++ b/substrate/frame/nis/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_nis +//! Autogenerated weights for `pallet_nis` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/nis/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/nis/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_nis. +/// Weight functions needed for `pallet_nis`. pub trait WeightInfo { fn place_bid(l: u32, ) -> Weight; fn place_bid_max() -> Weight; @@ -65,367 +64,367 @@ pub trait WeightInfo { fn process_bid() -> Weight; } -/// Weights for pallet_nis using the Substrate node and recommended hardware. +/// Weights for `pallet_nis` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 999]`. fn place_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6176 + l * (48 Β±0)` // Estimated: `51487` - // Minimum execution time: 49_410_000 picoseconds. - Weight::from_parts(57_832_282, 51487) - // Standard Error: 288 - .saturating_add(Weight::from_parts(51_621, 0).saturating_mul(l.into())) + // Minimum execution time: 47_908_000 picoseconds. + Weight::from_parts(50_096_676, 51487) + // Standard Error: 208 + .saturating_add(Weight::from_parts(41_318, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn place_bid_max() -> Weight { // Proof Size summary in bytes: // Measured: `54178` // Estimated: `51487` - // Minimum execution time: 119_696_000 picoseconds. - Weight::from_parts(121_838_000, 51487) + // Minimum execution time: 100_836_000 picoseconds. + Weight::from_parts(102_497_000, 51487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 1000]`. fn retract_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6176 + l * (48 Β±0)` // Estimated: `51487` - // Minimum execution time: 50_843_000 picoseconds. - Weight::from_parts(54_237_365, 51487) - // Standard Error: 243 - .saturating_add(Weight::from_parts(67_732, 0).saturating_mul(l.into())) + // Minimum execution time: 45_830_000 picoseconds. + Weight::from_parts(46_667_676, 51487) + // Standard Error: 130 + .saturating_add(Weight::from_parts(33_007, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Nis Summary (r:1 w:0) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nis::Summary` (r:1 w:0) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn fund_deficit() -> Weight { // Proof Size summary in bytes: // Measured: `191` // Estimated: `3593` - // Minimum execution time: 40_752_000 picoseconds. - Weight::from_parts(41_899_000, 3593) + // Minimum execution time: 30_440_000 picoseconds. + Weight::from_parts(31_240_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn communify() -> Weight { // Proof Size summary in bytes: // Measured: `668` // Estimated: `3675` - // Minimum execution time: 79_779_000 picoseconds. - Weight::from_parts(82_478_000, 3675) + // Minimum execution time: 71_017_000 picoseconds. + Weight::from_parts(72_504_000, 3675) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn privatize() -> Weight { // Proof Size summary in bytes: // Measured: `829` // Estimated: `3675` - // Minimum execution time: 99_588_000 picoseconds. - Weight::from_parts(102_340_000, 3675) + // Minimum execution time: 89_138_000 picoseconds. + Weight::from_parts(91_290_000, 3675) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn thaw_private() -> Weight { // Proof Size summary in bytes: // Measured: `354` - // Estimated: `3593` - // Minimum execution time: 53_094_000 picoseconds. - Weight::from_parts(54_543_000, 3593) + // Estimated: `3658` + // Minimum execution time: 47_917_000 picoseconds. + Weight::from_parts(49_121_000, 3658) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn thaw_communal() -> Weight { // Proof Size summary in bytes: // Measured: `773` // Estimated: `3675` - // Minimum execution time: 107_248_000 picoseconds. - Weight::from_parts(109_923_000, 3675) + // Minimum execution time: 91_320_000 picoseconds. + Weight::from_parts(93_080_000, 3675) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn process_queues() -> Weight { // Proof Size summary in bytes: // Measured: `6624` // Estimated: `7487` - // Minimum execution time: 27_169_000 picoseconds. - Weight::from_parts(29_201_000, 7487) + // Minimum execution time: 20_117_000 picoseconds. + Weight::from_parts(20_829_000, 7487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) fn process_queue() -> Weight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `51487` - // Minimum execution time: 4_540_000 picoseconds. - Weight::from_parts(4_699_000, 51487) + // Minimum execution time: 4_460_000 picoseconds. + Weight::from_parts(4_797_000, 51487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Nis Receipts (r:0 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:0 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn process_bid() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_085_000 picoseconds. - Weight::from_parts(7_336_000, 0) + // Minimum execution time: 4_609_000 picoseconds. + Weight::from_parts(4_834_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 999]`. fn place_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6176 + l * (48 Β±0)` // Estimated: `51487` - // Minimum execution time: 49_410_000 picoseconds. - Weight::from_parts(57_832_282, 51487) - // Standard Error: 288 - .saturating_add(Weight::from_parts(51_621, 0).saturating_mul(l.into())) + // Minimum execution time: 47_908_000 picoseconds. + Weight::from_parts(50_096_676, 51487) + // Standard Error: 208 + .saturating_add(Weight::from_parts(41_318, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn place_bid_max() -> Weight { // Proof Size summary in bytes: // Measured: `54178` // Estimated: `51487` - // Minimum execution time: 119_696_000 picoseconds. - Weight::from_parts(121_838_000, 51487) + // Minimum execution time: 100_836_000 picoseconds. + Weight::from_parts(102_497_000, 51487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 1000]`. fn retract_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6176 + l * (48 Β±0)` // Estimated: `51487` - // Minimum execution time: 50_843_000 picoseconds. - Weight::from_parts(54_237_365, 51487) - // Standard Error: 243 - .saturating_add(Weight::from_parts(67_732, 0).saturating_mul(l.into())) + // Minimum execution time: 45_830_000 picoseconds. + Weight::from_parts(46_667_676, 51487) + // Standard Error: 130 + .saturating_add(Weight::from_parts(33_007, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Nis Summary (r:1 w:0) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nis::Summary` (r:1 w:0) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn fund_deficit() -> Weight { // Proof Size summary in bytes: // Measured: `191` // Estimated: `3593` - // Minimum execution time: 40_752_000 picoseconds. - Weight::from_parts(41_899_000, 3593) + // Minimum execution time: 30_440_000 picoseconds. + Weight::from_parts(31_240_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn communify() -> Weight { // Proof Size summary in bytes: // Measured: `668` // Estimated: `3675` - // Minimum execution time: 79_779_000 picoseconds. - Weight::from_parts(82_478_000, 3675) + // Minimum execution time: 71_017_000 picoseconds. + Weight::from_parts(72_504_000, 3675) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn privatize() -> Weight { // Proof Size summary in bytes: // Measured: `829` // Estimated: `3675` - // Minimum execution time: 99_588_000 picoseconds. - Weight::from_parts(102_340_000, 3675) + // Minimum execution time: 89_138_000 picoseconds. + Weight::from_parts(91_290_000, 3675) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(85), added: 2560, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn thaw_private() -> Weight { // Proof Size summary in bytes: // Measured: `354` - // Estimated: `3593` - // Minimum execution time: 53_094_000 picoseconds. - Weight::from_parts(54_543_000, 3593) + // Estimated: `3658` + // Minimum execution time: 47_917_000 picoseconds. + Weight::from_parts(49_121_000, 3658) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:1 w:1) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn thaw_communal() -> Weight { // Proof Size summary in bytes: // Measured: `773` // Estimated: `3675` - // Minimum execution time: 107_248_000 picoseconds. - Weight::from_parts(109_923_000, 3675) + // Minimum execution time: 91_320_000 picoseconds. + Weight::from_parts(93_080_000, 3675) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn process_queues() -> Weight { // Proof Size summary in bytes: // Measured: `6624` // Estimated: `7487` - // Minimum execution time: 27_169_000 picoseconds. - Weight::from_parts(29_201_000, 7487) + // Minimum execution time: 20_117_000 picoseconds. + Weight::from_parts(20_829_000, 7487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) fn process_queue() -> Weight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `51487` - // Minimum execution time: 4_540_000 picoseconds. - Weight::from_parts(4_699_000, 51487) + // Minimum execution time: 4_460_000 picoseconds. + Weight::from_parts(4_797_000, 51487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Nis Receipts (r:0 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:0 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn process_bid() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_085_000 picoseconds. - Weight::from_parts(7_336_000, 0) + // Minimum execution time: 4_609_000 picoseconds. + Weight::from_parts(4_834_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/nomination-pools/src/weights.rs b/substrate/frame/nomination-pools/src/weights.rs index 047a17c3f9a..2749af7937f 100644 --- a/substrate/frame/nomination-pools/src/weights.rs +++ b/substrate/frame/nomination-pools/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_nomination_pools` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_nomination_pools +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_nomination_pools -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/nomination-pools/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -112,8 +114,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3425` // Estimated: `8877` - // Minimum execution time: 184_295_000 picoseconds. - Weight::from_parts(188_860_000, 8877) + // Minimum execution time: 181_861_000 picoseconds. + Weight::from_parts(186_375_000, 8877) .saturating_add(T::DbWeight::get().reads(20_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -145,8 +147,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3435` // Estimated: `8877` - // Minimum execution time: 188_777_000 picoseconds. - Weight::from_parts(192_646_000, 8877) + // Minimum execution time: 182_273_000 picoseconds. + Weight::from_parts(186_635_000, 8877) .saturating_add(T::DbWeight::get().reads(17_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -180,8 +182,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3500` // Estimated: `8877` - // Minimum execution time: 221_728_000 picoseconds. - Weight::from_parts(227_569_000, 8877) + // Minimum execution time: 217_878_000 picoseconds. + Weight::from_parts(221_493_000, 8877) .saturating_add(T::DbWeight::get().reads(18_u64)) .saturating_add(T::DbWeight::get().writes(14_u64)) } @@ -201,8 +203,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1172` // Estimated: `3719` - // Minimum execution time: 75_310_000 picoseconds. - Weight::from_parts(77_709_000, 3719) + // Minimum execution time: 74_509_000 picoseconds. + Weight::from_parts(76_683_000, 3719) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -242,8 +244,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3622` // Estimated: `27847` - // Minimum execution time: 170_656_000 picoseconds. - Weight::from_parts(174_950_000, 27847) + // Minimum execution time: 166_424_000 picoseconds. + Weight::from_parts(169_698_000, 27847) .saturating_add(T::DbWeight::get().reads(20_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -259,18 +261,20 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1817` + // Measured: `1848` // Estimated: `4764` - // Minimum execution time: 68_866_000 picoseconds. - Weight::from_parts(72_312_887, 4764) - // Standard Error: 1_635 - .saturating_add(Weight::from_parts(41_679, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Minimum execution time: 66_110_000 picoseconds. + Weight::from_parts(68_620_141, 4764) + // Standard Error: 1_379 + .saturating_add(Weight::from_parts(54_961, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -291,6 +295,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) @@ -300,13 +306,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2207` + // Measured: `2238` // Estimated: `27847` - // Minimum execution time: 131_383_000 picoseconds. - Weight::from_parts(136_595_971, 27847) - // Standard Error: 2_715 - .saturating_add(Weight::from_parts(52_351, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Minimum execution time: 125_669_000 picoseconds. + Weight::from_parts(130_907_641, 27847) + // Standard Error: 2_490 + .saturating_add(Weight::from_parts(75_219, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(9_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -333,12 +339,12 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) - /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForReversePoolIdLookup` (r:1 w:1) /// Proof: `NominationPools::CounterForReversePoolIdLookup` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) @@ -360,8 +366,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2525` // Estimated: `27847` - // Minimum execution time: 233_314_000 picoseconds. - Weight::from_parts(241_694_316, 27847) + // Minimum execution time: 225_700_000 picoseconds. + Weight::from_parts(234_390_990, 27847) .saturating_add(T::DbWeight::get().reads(24_u64)) .saturating_add(T::DbWeight::get().writes(20_u64)) } @@ -413,8 +419,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1169` // Estimated: `8538` - // Minimum execution time: 171_465_000 picoseconds. - Weight::from_parts(176_478_000, 8538) + // Minimum execution time: 167_171_000 picoseconds. + Weight::from_parts(170_531_000, 8538) .saturating_add(T::DbWeight::get().reads(23_u64)) .saturating_add(T::DbWeight::get().writes(17_u64)) } @@ -447,10 +453,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1808` // Estimated: `4556 + n * (2520 Β±0)` - // Minimum execution time: 63_588_000 picoseconds. - Weight::from_parts(64_930_584, 4556) - // Standard Error: 9_167 - .saturating_add(Weight::from_parts(1_595_779, 0).saturating_mul(n.into())) + // Minimum execution time: 63_785_000 picoseconds. + Weight::from_parts(64_592_302, 4556) + // Standard Error: 9_416 + .saturating_add(Weight::from_parts(1_524_398, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -466,8 +472,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1434` // Estimated: `4556` - // Minimum execution time: 32_899_000 picoseconds. - Weight::from_parts(33_955_000, 4556) + // Minimum execution time: 32_096_000 picoseconds. + Weight::from_parts(33_533_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -482,10 +488,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3735` - // Minimum execution time: 13_778_000 picoseconds. - Weight::from_parts(14_770_006, 3735) - // Standard Error: 151 - .saturating_add(Weight::from_parts(1_900, 0).saturating_mul(n.into())) + // Minimum execution time: 13_914_000 picoseconds. + Weight::from_parts(14_800_402, 3735) + // Standard Error: 146 + .saturating_add(Weight::from_parts(940, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -505,8 +511,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_550_000 picoseconds. - Weight::from_parts(4_935_000, 0) + // Minimum execution time: 4_373_000 picoseconds. + Weight::from_parts(4_592_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -515,8 +521,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_759_000 picoseconds. - Weight::from_parts(17_346_000, 3719) + // Minimum execution time: 16_662_000 picoseconds. + Weight::from_parts(17_531_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -542,8 +548,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1971` // Estimated: `4556` - // Minimum execution time: 61_970_000 picoseconds. - Weight::from_parts(63_738_000, 4556) + // Minimum execution time: 61_348_000 picoseconds. + Weight::from_parts(63_712_000, 4556) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -559,8 +565,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `804` // Estimated: `3719` - // Minimum execution time: 31_950_000 picoseconds. - Weight::from_parts(33_190_000, 3719) + // Minimum execution time: 32_232_000 picoseconds. + Weight::from_parts(33_433_000, 3719) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -572,8 +578,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `572` // Estimated: `3719` - // Minimum execution time: 16_807_000 picoseconds. - Weight::from_parts(17_733_000, 3719) + // Minimum execution time: 16_774_000 picoseconds. + Weight::from_parts(17_671_000, 3719) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -583,8 +589,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_710_000 picoseconds. - Weight::from_parts(17_563_000, 3719) + // Minimum execution time: 16_724_000 picoseconds. + Weight::from_parts(17_181_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -594,8 +600,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_493_000 picoseconds. - Weight::from_parts(17_022_000, 3719) + // Minimum execution time: 16_362_000 picoseconds. + Weight::from_parts(17_135_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -607,8 +613,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 14_248_000 picoseconds. - Weight::from_parts(15_095_000, 3702) + // Minimum execution time: 14_125_000 picoseconds. + Weight::from_parts(14_705_000, 3702) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -624,8 +630,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1002` // Estimated: `3719` - // Minimum execution time: 61_969_000 picoseconds. - Weight::from_parts(63_965_000, 3719) + // Minimum execution time: 61_699_000 picoseconds. + Weight::from_parts(63_605_000, 3719) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -641,8 +647,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `901` // Estimated: `4764` - // Minimum execution time: 65_462_000 picoseconds. - Weight::from_parts(67_250_000, 4764) + // Minimum execution time: 64_930_000 picoseconds. + Weight::from_parts(66_068_000, 4764) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -686,8 +692,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3425` // Estimated: `8877` - // Minimum execution time: 184_295_000 picoseconds. - Weight::from_parts(188_860_000, 8877) + // Minimum execution time: 181_861_000 picoseconds. + Weight::from_parts(186_375_000, 8877) .saturating_add(RocksDbWeight::get().reads(20_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -719,8 +725,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3435` // Estimated: `8877` - // Minimum execution time: 188_777_000 picoseconds. - Weight::from_parts(192_646_000, 8877) + // Minimum execution time: 182_273_000 picoseconds. + Weight::from_parts(186_635_000, 8877) .saturating_add(RocksDbWeight::get().reads(17_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -754,8 +760,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3500` // Estimated: `8877` - // Minimum execution time: 221_728_000 picoseconds. - Weight::from_parts(227_569_000, 8877) + // Minimum execution time: 217_878_000 picoseconds. + Weight::from_parts(221_493_000, 8877) .saturating_add(RocksDbWeight::get().reads(18_u64)) .saturating_add(RocksDbWeight::get().writes(14_u64)) } @@ -775,8 +781,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1172` // Estimated: `3719` - // Minimum execution time: 75_310_000 picoseconds. - Weight::from_parts(77_709_000, 3719) + // Minimum execution time: 74_509_000 picoseconds. + Weight::from_parts(76_683_000, 3719) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -816,8 +822,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3622` // Estimated: `27847` - // Minimum execution time: 170_656_000 picoseconds. - Weight::from_parts(174_950_000, 27847) + // Minimum execution time: 166_424_000 picoseconds. + Weight::from_parts(169_698_000, 27847) .saturating_add(RocksDbWeight::get().reads(20_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -833,18 +839,20 @@ impl WeightInfo for () { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1817` + // Measured: `1848` // Estimated: `4764` - // Minimum execution time: 68_866_000 picoseconds. - Weight::from_parts(72_312_887, 4764) - // Standard Error: 1_635 - .saturating_add(Weight::from_parts(41_679, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Minimum execution time: 66_110_000 picoseconds. + Weight::from_parts(68_620_141, 4764) + // Standard Error: 1_379 + .saturating_add(Weight::from_parts(54_961, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -865,6 +873,8 @@ impl WeightInfo for () { /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) @@ -874,13 +884,13 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2207` + // Measured: `2238` // Estimated: `27847` - // Minimum execution time: 131_383_000 picoseconds. - Weight::from_parts(136_595_971, 27847) - // Standard Error: 2_715 - .saturating_add(Weight::from_parts(52_351, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Minimum execution time: 125_669_000 picoseconds. + Weight::from_parts(130_907_641, 27847) + // Standard Error: 2_490 + .saturating_add(Weight::from_parts(75_219, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(9_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -907,12 +917,12 @@ impl WeightInfo for () { /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) - /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForReversePoolIdLookup` (r:1 w:1) /// Proof: `NominationPools::CounterForReversePoolIdLookup` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) @@ -934,8 +944,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2525` // Estimated: `27847` - // Minimum execution time: 233_314_000 picoseconds. - Weight::from_parts(241_694_316, 27847) + // Minimum execution time: 225_700_000 picoseconds. + Weight::from_parts(234_390_990, 27847) .saturating_add(RocksDbWeight::get().reads(24_u64)) .saturating_add(RocksDbWeight::get().writes(20_u64)) } @@ -987,8 +997,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1169` // Estimated: `8538` - // Minimum execution time: 171_465_000 picoseconds. - Weight::from_parts(176_478_000, 8538) + // Minimum execution time: 167_171_000 picoseconds. + Weight::from_parts(170_531_000, 8538) .saturating_add(RocksDbWeight::get().reads(23_u64)) .saturating_add(RocksDbWeight::get().writes(17_u64)) } @@ -1021,10 +1031,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1808` // Estimated: `4556 + n * (2520 Β±0)` - // Minimum execution time: 63_588_000 picoseconds. - Weight::from_parts(64_930_584, 4556) - // Standard Error: 9_167 - .saturating_add(Weight::from_parts(1_595_779, 0).saturating_mul(n.into())) + // Minimum execution time: 63_785_000 picoseconds. + Weight::from_parts(64_592_302, 4556) + // Standard Error: 9_416 + .saturating_add(Weight::from_parts(1_524_398, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -1040,8 +1050,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1434` // Estimated: `4556` - // Minimum execution time: 32_899_000 picoseconds. - Weight::from_parts(33_955_000, 4556) + // Minimum execution time: 32_096_000 picoseconds. + Weight::from_parts(33_533_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1056,10 +1066,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3735` - // Minimum execution time: 13_778_000 picoseconds. - Weight::from_parts(14_770_006, 3735) - // Standard Error: 151 - .saturating_add(Weight::from_parts(1_900, 0).saturating_mul(n.into())) + // Minimum execution time: 13_914_000 picoseconds. + Weight::from_parts(14_800_402, 3735) + // Standard Error: 146 + .saturating_add(Weight::from_parts(940, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1079,8 +1089,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_550_000 picoseconds. - Weight::from_parts(4_935_000, 0) + // Minimum execution time: 4_373_000 picoseconds. + Weight::from_parts(4_592_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -1089,8 +1099,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_759_000 picoseconds. - Weight::from_parts(17_346_000, 3719) + // Minimum execution time: 16_662_000 picoseconds. + Weight::from_parts(17_531_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1116,8 +1126,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1971` // Estimated: `4556` - // Minimum execution time: 61_970_000 picoseconds. - Weight::from_parts(63_738_000, 4556) + // Minimum execution time: 61_348_000 picoseconds. + Weight::from_parts(63_712_000, 4556) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1133,8 +1143,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `804` // Estimated: `3719` - // Minimum execution time: 31_950_000 picoseconds. - Weight::from_parts(33_190_000, 3719) + // Minimum execution time: 32_232_000 picoseconds. + Weight::from_parts(33_433_000, 3719) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1146,8 +1156,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `572` // Estimated: `3719` - // Minimum execution time: 16_807_000 picoseconds. - Weight::from_parts(17_733_000, 3719) + // Minimum execution time: 16_774_000 picoseconds. + Weight::from_parts(17_671_000, 3719) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1157,8 +1167,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_710_000 picoseconds. - Weight::from_parts(17_563_000, 3719) + // Minimum execution time: 16_724_000 picoseconds. + Weight::from_parts(17_181_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1168,8 +1178,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_493_000 picoseconds. - Weight::from_parts(17_022_000, 3719) + // Minimum execution time: 16_362_000 picoseconds. + Weight::from_parts(17_135_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1181,8 +1191,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 14_248_000 picoseconds. - Weight::from_parts(15_095_000, 3702) + // Minimum execution time: 14_125_000 picoseconds. + Weight::from_parts(14_705_000, 3702) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1198,8 +1208,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1002` // Estimated: `3719` - // Minimum execution time: 61_969_000 picoseconds. - Weight::from_parts(63_965_000, 3719) + // Minimum execution time: 61_699_000 picoseconds. + Weight::from_parts(63_605_000, 3719) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1215,8 +1225,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `901` // Estimated: `4764` - // Minimum execution time: 65_462_000 picoseconds. - Weight::from_parts(67_250_000, 4764) + // Minimum execution time: 64_930_000 picoseconds. + Weight::from_parts(66_068_000, 4764) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index 01ad8d64f10..b3260f0841f 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -148,7 +148,7 @@ parameter_types! { pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); } -pub type Extrinsic = sp_runtime::testing::TestXt; +pub type Extrinsic = sp_runtime::generic::UncheckedExtrinsic; pub struct OnChainSeqPhragmen; impl onchain::Config for OnChainSeqPhragmen { diff --git a/substrate/frame/parameters/src/weights.rs b/substrate/frame/parameters/src/weights.rs index 6746960b1b7..340eb9e31b7 100644 --- a/substrate/frame/parameters/src/weights.rs +++ b/substrate/frame/parameters/src/weights.rs @@ -18,25 +18,27 @@ //! Autogenerated weights for `pallet_parameters` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_parameters +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_parameters -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/parameters/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -55,22 +57,30 @@ pub trait WeightInfo { /// Weights for `pallet_parameters` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + /// Storage: `Parameters::Parameters` (r:1 w:1) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn set_parameter() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) + // Measured: `3` + // Estimated: `3501` + // Minimum execution time: 8_400_000 picoseconds. + Weight::from_parts(8_682_000, 3501) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } } // For backwards compatibility and tests. impl WeightInfo for () { + /// Storage: `Parameters::Parameters` (r:1 w:1) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) fn set_parameter() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) + // Measured: `3` + // Estimated: `3501` + // Minimum execution time: 8_400_000 picoseconds. + Weight::from_parts(8_682_000, 3501) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/preimage/src/weights.rs b/substrate/frame/preimage/src/weights.rs index c11ab74c1e5..6167cd53029 100644 --- a/substrate/frame/preimage/src/weights.rs +++ b/substrate/frame/preimage/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_preimage` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-mia4uyug-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_preimage +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_preimage -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/preimage/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -70,200 +72,209 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3556` - // Minimum execution time: 15_936_000 picoseconds. - Weight::from_parts(16_271_000, 3556) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_916, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + // Measured: `112` + // Estimated: `6012` + // Minimum execution time: 48_893_000 picoseconds. + Weight::from_parts(44_072_327, 6012) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_684, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 16_468_000 picoseconds. - Weight::from_parts(17_031_000, 3556) + // Minimum execution time: 15_675_000 picoseconds. + Weight::from_parts(4_564_145, 3556) // Standard Error: 2 - .saturating_add(Weight::from_parts(1_948, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_678, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 16_342_000 picoseconds. - Weight::from_parts(16_535_000, 3556) + // Minimum execution time: 14_959_000 picoseconds. + Weight::from_parts(15_335_000, 3556) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_906, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_687, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `3556` - // Minimum execution time: 31_047_000 picoseconds. - Weight::from_parts(34_099_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + // Measured: `311` + // Estimated: `3658` + // Minimum execution time: 47_378_000 picoseconds. + Weight::from_parts(48_776_000, 3658) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 32_559_000 picoseconds. - Weight::from_parts(36_677_000, 3556) + // Minimum execution time: 20_939_000 picoseconds. + Weight::from_parts(21_577_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `172` + // Measured: `255` // Estimated: `3556` - // Minimum execution time: 27_887_000 picoseconds. - Weight::from_parts(30_303_000, 3556) + // Minimum execution time: 17_945_000 picoseconds. + Weight::from_parts(18_448_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 17_256_000 picoseconds. - Weight::from_parts(19_481_000, 3556) + // Minimum execution time: 12_132_000 picoseconds. + Weight::from_parts(12_710_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `109` // Estimated: `3556` - // Minimum execution time: 22_344_000 picoseconds. - Weight::from_parts(23_868_000, 3556) + // Minimum execution time: 13_014_000 picoseconds. + Weight::from_parts(13_726_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_542_000 picoseconds. - Weight::from_parts(11_571_000, 3556) + // Minimum execution time: 9_785_000 picoseconds. + Weight::from_parts(10_266_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 29_054_000 picoseconds. - Weight::from_parts(32_996_000, 3556) + // Minimum execution time: 18_764_000 picoseconds. + Weight::from_parts(19_635_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_775_000 picoseconds. - Weight::from_parts(11_937_000, 3556) + // Minimum execution time: 9_624_000 picoseconds. + Weight::from_parts(10_044_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_696_000 picoseconds. - Weight::from_parts(11_717_000, 3556) + // Minimum execution time: 9_432_000 picoseconds. + Weight::from_parts(9_991_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Preimage::StatusFor` (r:1024 w:1024) + /// Storage: `Preimage::StatusFor` (r:1023 w:1023) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `System::Account` (r:1023 w:1023) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:0 w:1024) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 1024]`. + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1023 w:1023) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:0 w:1023) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 1024]`. fn ensure_updated(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + n * (91 Β±0)` - // Estimated: `3593 + n * (2566 Β±0)` - // Minimum execution time: 2_452_000 picoseconds. - Weight::from_parts(2_641_000, 3593) - // Standard Error: 19_797 - .saturating_add(Weight::from_parts(15_620_946, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) + // Measured: `0 + n * (227 Β±0)` + // Estimated: `6012 + n * (2668 Β±0)` + // Minimum execution time: 54_056_000 picoseconds. + Weight::from_parts(54_912_000, 6012) + // Standard Error: 42_469 + .saturating_add(Weight::from_parts(50_710_258, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2668).saturating_mul(n.into())) } } @@ -272,199 +283,208 @@ impl WeightInfo for () { /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3556` - // Minimum execution time: 15_936_000 picoseconds. - Weight::from_parts(16_271_000, 3556) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_916, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + // Measured: `112` + // Estimated: `6012` + // Minimum execution time: 48_893_000 picoseconds. + Weight::from_parts(44_072_327, 6012) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_684, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 16_468_000 picoseconds. - Weight::from_parts(17_031_000, 3556) + // Minimum execution time: 15_675_000 picoseconds. + Weight::from_parts(4_564_145, 3556) // Standard Error: 2 - .saturating_add(Weight::from_parts(1_948, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_678, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 16_342_000 picoseconds. - Weight::from_parts(16_535_000, 3556) + // Minimum execution time: 14_959_000 picoseconds. + Weight::from_parts(15_335_000, 3556) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_906, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(1_687, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `3556` - // Minimum execution time: 31_047_000 picoseconds. - Weight::from_parts(34_099_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + // Measured: `311` + // Estimated: `3658` + // Minimum execution time: 47_378_000 picoseconds. + Weight::from_parts(48_776_000, 3658) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 32_559_000 picoseconds. - Weight::from_parts(36_677_000, 3556) + // Minimum execution time: 20_939_000 picoseconds. + Weight::from_parts(21_577_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `172` + // Measured: `255` // Estimated: `3556` - // Minimum execution time: 27_887_000 picoseconds. - Weight::from_parts(30_303_000, 3556) + // Minimum execution time: 17_945_000 picoseconds. + Weight::from_parts(18_448_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 17_256_000 picoseconds. - Weight::from_parts(19_481_000, 3556) + // Minimum execution time: 12_132_000 picoseconds. + Weight::from_parts(12_710_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `109` // Estimated: `3556` - // Minimum execution time: 22_344_000 picoseconds. - Weight::from_parts(23_868_000, 3556) + // Minimum execution time: 13_014_000 picoseconds. + Weight::from_parts(13_726_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_542_000 picoseconds. - Weight::from_parts(11_571_000, 3556) + // Minimum execution time: 9_785_000 picoseconds. + Weight::from_parts(10_266_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `144` + // Measured: `211` // Estimated: `3556` - // Minimum execution time: 29_054_000 picoseconds. - Weight::from_parts(32_996_000, 3556) + // Minimum execution time: 18_764_000 picoseconds. + Weight::from_parts(19_635_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_775_000 picoseconds. - Weight::from_parts(11_937_000, 3556) + // Minimum execution time: 9_624_000 picoseconds. + Weight::from_parts(10_044_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `173` // Estimated: `3556` - // Minimum execution time: 10_696_000 picoseconds. - Weight::from_parts(11_717_000, 3556) + // Minimum execution time: 9_432_000 picoseconds. + Weight::from_parts(9_991_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Preimage::StatusFor` (r:1024 w:1024) + /// Storage: `Preimage::StatusFor` (r:1023 w:1023) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `System::Account` (r:1023 w:1023) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Preimage::RequestStatusFor` (r:0 w:1024) - /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(75), added: 2550, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 1024]`. + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1023 w:1023) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:0 w:1023) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 1024]`. fn ensure_updated(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + n * (91 Β±0)` - // Estimated: `3593 + n * (2566 Β±0)` - // Minimum execution time: 2_452_000 picoseconds. - Weight::from_parts(2_641_000, 3593) - // Standard Error: 19_797 - .saturating_add(Weight::from_parts(15_620_946, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) + // Measured: `0 + n * (227 Β±0)` + // Estimated: `6012 + n * (2668 Β±0)` + // Minimum execution time: 54_056_000 picoseconds. + Weight::from_parts(54_912_000, 6012) + // Standard Error: 42_469 + .saturating_add(Weight::from_parts(50_710_258, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2668).saturating_mul(n.into())) } } diff --git a/substrate/frame/proxy/src/weights.rs b/substrate/frame/proxy/src/weights.rs index f30fe73d27a..3c37c91d500 100644 --- a/substrate/frame/proxy/src/weights.rs +++ b/substrate/frame/proxy/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_proxy +//! Autogenerated weights for `pallet_proxy` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/proxy/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/proxy/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_proxy. +/// Weight functions needed for `pallet_proxy`. pub trait WeightInfo { fn proxy(p: u32, ) -> Weight; fn proxy_announced(a: u32, p: u32, ) -> Weight; @@ -64,336 +63,352 @@ pub trait WeightInfo { fn kill_pure(p: u32, ) -> Weight; } -/// Weights for pallet_proxy using the Substrate node and recommended hardware. +/// Weights for `pallet_proxy` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `161 + p * (37 Β±0)` + // Measured: `306 + p * (37 Β±0)` // Estimated: `4706` - // Minimum execution time: 15_182_000 picoseconds. - Weight::from_parts(15_919_146, 4706) - // Standard Error: 1_586 - .saturating_add(Weight::from_parts(31_768, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 18_437_000 picoseconds. + Weight::from_parts(19_610_577, 4706) + // Standard Error: 2_531 + .saturating_add(Weight::from_parts(26_001, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) } - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `488 + a * (68 Β±0) + p * (37 Β±0)` + // Measured: `633 + a * (68 Β±0) + p * (37 Β±0)` // Estimated: `5698` - // Minimum execution time: 40_256_000 picoseconds. - Weight::from_parts(40_373_648, 5698) - // Standard Error: 3_978 - .saturating_add(Weight::from_parts(166_936, 0).saturating_mul(a.into())) - // Standard Error: 4_110 - .saturating_add(Weight::from_parts(54_329, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Minimum execution time: 40_426_000 picoseconds. + Weight::from_parts(40_200_295, 5698) + // Standard Error: 2_922 + .saturating_add(Weight::from_parts(161_885, 0).saturating_mul(a.into())) + // Standard Error: 3_019 + .saturating_add(Weight::from_parts(69_710, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn remove_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + a * (68 Β±0)` // Estimated: `5698` - // Minimum execution time: 25_040_000 picoseconds. - Weight::from_parts(25_112_188, 5698) - // Standard Error: 2_143 - .saturating_add(Weight::from_parts(189_027, 0).saturating_mul(a.into())) - // Standard Error: 2_214 - .saturating_add(Weight::from_parts(26_683, 0).saturating_mul(p.into())) + // Minimum execution time: 21_905_000 picoseconds. + Weight::from_parts(22_717_430, 5698) + // Standard Error: 2_004 + .saturating_add(Weight::from_parts(153_390, 0).saturating_mul(a.into())) + // Standard Error: 2_071 + .saturating_add(Weight::from_parts(5_676, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn reject_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + a * (68 Β±0)` // Estimated: `5698` - // Minimum execution time: 24_884_000 picoseconds. - Weight::from_parts(25_359_291, 5698) - // Standard Error: 2_019 - .saturating_add(Weight::from_parts(181_470, 0).saturating_mul(a.into())) - // Standard Error: 2_086 - .saturating_add(Weight::from_parts(17_725, 0).saturating_mul(p.into())) + // Minimum execution time: 21_974_000 picoseconds. + Weight::from_parts(22_484_324, 5698) + // Standard Error: 1_846 + .saturating_add(Weight::from_parts(153_904, 0).saturating_mul(a.into())) + // Standard Error: 1_907 + .saturating_add(Weight::from_parts(9_616, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `420 + a * (68 Β±0) + p * (37 Β±0)` // Estimated: `5698` - // Minimum execution time: 35_039_000 picoseconds. - Weight::from_parts(36_727_868, 5698) - // Standard Error: 4_463 - .saturating_add(Weight::from_parts(167_060, 0).saturating_mul(a.into())) - // Standard Error: 4_611 - .saturating_add(Weight::from_parts(59_836, 0).saturating_mul(p.into())) + // Minimum execution time: 30_454_000 picoseconds. + Weight::from_parts(32_128_158, 5698) + // Standard Error: 3_778 + .saturating_add(Weight::from_parts(137_366, 0).saturating_mul(a.into())) + // Standard Error: 3_904 + .saturating_add(Weight::from_parts(53_040, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `161 + p * (37 Β±0)` // Estimated: `4706` - // Minimum execution time: 25_697_000 picoseconds. - Weight::from_parts(26_611_090, 4706) - // Standard Error: 2_306 - .saturating_add(Weight::from_parts(85_165, 0).saturating_mul(p.into())) + // Minimum execution time: 21_391_000 picoseconds. + Weight::from_parts(22_202_614, 4706) + // Standard Error: 1_750 + .saturating_add(Weight::from_parts(49_639, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `161 + p * (37 Β±0)` // Estimated: `4706` - // Minimum execution time: 25_638_000 picoseconds. - Weight::from_parts(26_904_510, 4706) - // Standard Error: 2_669 - .saturating_add(Weight::from_parts(61_668, 0).saturating_mul(p.into())) + // Minimum execution time: 21_375_000 picoseconds. + Weight::from_parts(22_392_601, 4706) + // Standard Error: 2_415 + .saturating_add(Weight::from_parts(40_345, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `161 + p * (37 Β±0)` // Estimated: `4706` - // Minimum execution time: 22_737_000 picoseconds. - Weight::from_parts(23_618_441, 4706) - // Standard Error: 1_729 - .saturating_add(Weight::from_parts(44_009, 0).saturating_mul(p.into())) + // Minimum execution time: 19_833_000 picoseconds. + Weight::from_parts(20_839_747, 4706) + // Standard Error: 1_742 + .saturating_add(Weight::from_parts(40_874, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn create_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `173` // Estimated: `4706` - // Minimum execution time: 27_364_000 picoseconds. - Weight::from_parts(28_632_271, 4706) - // Standard Error: 1_613 - .saturating_add(Weight::from_parts(2_453, 0).saturating_mul(p.into())) + // Minimum execution time: 22_231_000 picoseconds. + Weight::from_parts(23_370_995, 4706) + // Standard Error: 1_521 + .saturating_add(Weight::from_parts(4_892, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `198 + p * (37 Β±0)` // Estimated: `4706` - // Minimum execution time: 23_552_000 picoseconds. - Weight::from_parts(24_874_553, 4706) - // Standard Error: 1_919 - .saturating_add(Weight::from_parts(38_799, 0).saturating_mul(p.into())) + // Minimum execution time: 20_614_000 picoseconds. + Weight::from_parts(21_845_970, 4706) + // Standard Error: 1_636 + .saturating_add(Weight::from_parts(34_480, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `161 + p * (37 Β±0)` + // Measured: `306 + p * (37 Β±0)` // Estimated: `4706` - // Minimum execution time: 15_182_000 picoseconds. - Weight::from_parts(15_919_146, 4706) - // Standard Error: 1_586 - .saturating_add(Weight::from_parts(31_768, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 18_437_000 picoseconds. + Weight::from_parts(19_610_577, 4706) + // Standard Error: 2_531 + .saturating_add(Weight::from_parts(26_001, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) } - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `488 + a * (68 Β±0) + p * (37 Β±0)` + // Measured: `633 + a * (68 Β±0) + p * (37 Β±0)` // Estimated: `5698` - // Minimum execution time: 40_256_000 picoseconds. - Weight::from_parts(40_373_648, 5698) - // Standard Error: 3_978 - .saturating_add(Weight::from_parts(166_936, 0).saturating_mul(a.into())) - // Standard Error: 4_110 - .saturating_add(Weight::from_parts(54_329, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Minimum execution time: 40_426_000 picoseconds. + Weight::from_parts(40_200_295, 5698) + // Standard Error: 2_922 + .saturating_add(Weight::from_parts(161_885, 0).saturating_mul(a.into())) + // Standard Error: 3_019 + .saturating_add(Weight::from_parts(69_710, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn remove_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + a * (68 Β±0)` // Estimated: `5698` - // Minimum execution time: 25_040_000 picoseconds. - Weight::from_parts(25_112_188, 5698) - // Standard Error: 2_143 - .saturating_add(Weight::from_parts(189_027, 0).saturating_mul(a.into())) - // Standard Error: 2_214 - .saturating_add(Weight::from_parts(26_683, 0).saturating_mul(p.into())) + // Minimum execution time: 21_905_000 picoseconds. + Weight::from_parts(22_717_430, 5698) + // Standard Error: 2_004 + .saturating_add(Weight::from_parts(153_390, 0).saturating_mul(a.into())) + // Standard Error: 2_071 + .saturating_add(Weight::from_parts(5_676, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn reject_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `403 + a * (68 Β±0)` // Estimated: `5698` - // Minimum execution time: 24_884_000 picoseconds. - Weight::from_parts(25_359_291, 5698) - // Standard Error: 2_019 - .saturating_add(Weight::from_parts(181_470, 0).saturating_mul(a.into())) - // Standard Error: 2_086 - .saturating_add(Weight::from_parts(17_725, 0).saturating_mul(p.into())) + // Minimum execution time: 21_974_000 picoseconds. + Weight::from_parts(22_484_324, 5698) + // Standard Error: 1_846 + .saturating_add(Weight::from_parts(153_904, 0).saturating_mul(a.into())) + // Standard Error: 1_907 + .saturating_add(Weight::from_parts(9_616, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `420 + a * (68 Β±0) + p * (37 Β±0)` // Estimated: `5698` - // Minimum execution time: 35_039_000 picoseconds. - Weight::from_parts(36_727_868, 5698) - // Standard Error: 4_463 - .saturating_add(Weight::from_parts(167_060, 0).saturating_mul(a.into())) - // Standard Error: 4_611 - .saturating_add(Weight::from_parts(59_836, 0).saturating_mul(p.into())) + // Minimum execution time: 30_454_000 picoseconds. + Weight::from_parts(32_128_158, 5698) + // Standard Error: 3_778 + .saturating_add(Weight::from_parts(137_366, 0).saturating_mul(a.into())) + // Standard Error: 3_904 + .saturating_add(Weight::from_parts(53_040, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `161 + p * (37 Β±0)` // Estimated: `4706` - // Minimum execution time: 25_697_000 picoseconds. - Weight::from_parts(26_611_090, 4706) - // Standard Error: 2_306 - .saturating_add(Weight::from_parts(85_165, 0).saturating_mul(p.into())) + // Minimum execution time: 21_391_000 picoseconds. + Weight::from_parts(22_202_614, 4706) + // Standard Error: 1_750 + .saturating_add(Weight::from_parts(49_639, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `161 + p * (37 Β±0)` // Estimated: `4706` - // Minimum execution time: 25_638_000 picoseconds. - Weight::from_parts(26_904_510, 4706) - // Standard Error: 2_669 - .saturating_add(Weight::from_parts(61_668, 0).saturating_mul(p.into())) + // Minimum execution time: 21_375_000 picoseconds. + Weight::from_parts(22_392_601, 4706) + // Standard Error: 2_415 + .saturating_add(Weight::from_parts(40_345, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `161 + p * (37 Β±0)` // Estimated: `4706` - // Minimum execution time: 22_737_000 picoseconds. - Weight::from_parts(23_618_441, 4706) - // Standard Error: 1_729 - .saturating_add(Weight::from_parts(44_009, 0).saturating_mul(p.into())) + // Minimum execution time: 19_833_000 picoseconds. + Weight::from_parts(20_839_747, 4706) + // Standard Error: 1_742 + .saturating_add(Weight::from_parts(40_874, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn create_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `173` // Estimated: `4706` - // Minimum execution time: 27_364_000 picoseconds. - Weight::from_parts(28_632_271, 4706) - // Standard Error: 1_613 - .saturating_add(Weight::from_parts(2_453, 0).saturating_mul(p.into())) + // Minimum execution time: 22_231_000 picoseconds. + Weight::from_parts(23_370_995, 4706) + // Standard Error: 1_521 + .saturating_add(Weight::from_parts(4_892, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `198 + p * (37 Β±0)` // Estimated: `4706` - // Minimum execution time: 23_552_000 picoseconds. - Weight::from_parts(24_874_553, 4706) - // Standard Error: 1_919 - .saturating_add(Weight::from_parts(38_799, 0).saturating_mul(p.into())) + // Minimum execution time: 20_614_000 picoseconds. + Weight::from_parts(21_845_970, 4706) + // Standard Error: 1_636 + .saturating_add(Weight::from_parts(34_480, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/ranked-collective/src/weights.rs b/substrate/frame/ranked-collective/src/weights.rs index 4ff0c3337d5..5721858f7e2 100644 --- a/substrate/frame/ranked-collective/src/weights.rs +++ b/substrate/frame/ranked-collective/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_ranked_collective +//! Autogenerated weights for `pallet_ranked_collective` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/ranked-collective/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/ranked-collective/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_ranked_collective. +/// Weight functions needed for `pallet_ranked_collective`. pub trait WeightInfo { fn add_member() -> Weight; fn remove_member(r: u32, ) -> Weight; @@ -61,283 +60,295 @@ pub trait WeightInfo { fn exchange_member() -> Weight; } -/// Weights for pallet_ranked_collective using the Substrate node and recommended hardware. +/// Weights for `pallet_ranked_collective` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:0 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:0 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn add_member() -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3507` - // Minimum execution time: 17_245_000 picoseconds. - Weight::from_parts(17_930_000, 3507) + // Minimum execution time: 15_389_000 picoseconds. + Weight::from_parts(15_901_000, 3507) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:11 w:11) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:11 w:11) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:11 w:11) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:11 w:11) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:11 w:22) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:11 w:22) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. fn remove_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `616 + r * (281 Β±0)` // Estimated: `3519 + r * (2529 Β±0)` - // Minimum execution time: 29_534_000 picoseconds. - Weight::from_parts(32_847_495, 3519) - // Standard Error: 24_211 - .saturating_add(Weight::from_parts(13_949_639, 0).saturating_mul(r.into())) + // Minimum execution time: 29_541_000 picoseconds. + Weight::from_parts(32_239_358, 3519) + // Standard Error: 23_406 + .saturating_add(Weight::from_parts(16_030_191, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(6_u64)) + .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) } - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:0 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:0 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. fn promote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `314 + r * (17 Β±0)` // Estimated: `3507` - // Minimum execution time: 20_333_000 picoseconds. - Weight::from_parts(21_592_224, 3507) - // Standard Error: 6_423 - .saturating_add(Weight::from_parts(321_314, 0).saturating_mul(r.into())) + // Minimum execution time: 17_939_000 picoseconds. + Weight::from_parts(19_290_416, 3507) + // Standard Error: 5_710 + .saturating_add(Weight::from_parts(374_399, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:1 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:1 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:1 w:2) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:1 w:2) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. fn demote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `632 + r * (72 Β±0)` // Estimated: `3519` - // Minimum execution time: 29_446_000 picoseconds. - Weight::from_parts(32_447_715, 3519) - // Standard Error: 28_791 - .saturating_add(Weight::from_parts(822_890, 0).saturating_mul(r.into())) + // Minimum execution time: 29_609_000 picoseconds. + Weight::from_parts(32_686_167, 3519) + // Standard Error: 27_588 + .saturating_add(Weight::from_parts(789_212, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedPolls ReferendumInfoFor (r:1 w:1) - /// Proof: RankedPolls ReferendumInfoFor (max_values: None, max_size: Some(330), added: 2805, mode: MaxEncodedLen) - /// Storage: RankedCollective Voting (r:1 w:1) - /// Proof: RankedCollective Voting (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedPolls::ReferendumInfoFor` (r:1 w:1) + /// Proof: `RankedPolls::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(330), added: 2805, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Voting` (r:1 w:1) + /// Proof: `RankedCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote() -> Weight { // Proof Size summary in bytes: // Measured: `628` // Estimated: `219984` - // Minimum execution time: 45_474_000 picoseconds. - Weight::from_parts(47_228_000, 219984) + // Minimum execution time: 41_151_000 picoseconds. + Weight::from_parts(42_435_000, 219984) .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: RankedPolls ReferendumInfoFor (r:1 w:0) - /// Proof: RankedPolls ReferendumInfoFor (max_values: None, max_size: Some(330), added: 2805, mode: MaxEncodedLen) - /// Storage: RankedCollective VotingCleanup (r:1 w:0) - /// Proof: RankedCollective VotingCleanup (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: RankedCollective Voting (r:100 w:100) - /// Proof: RankedCollective Voting (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `RankedPolls::ReferendumInfoFor` (r:1 w:0) + /// Proof: `RankedPolls::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(330), added: 2805, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::VotingCleanup` (r:1 w:0) + /// Proof: `RankedCollective::VotingCleanup` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Voting` (r:100 w:100) + /// Proof: `RankedCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. fn cleanup_poll(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `462 + n * (50 Β±0)` // Estimated: `3795 + n * (2540 Β±0)` - // Minimum execution time: 13_903_000 picoseconds. - Weight::from_parts(18_209_102, 3795) - // Standard Error: 2_556 - .saturating_add(Weight::from_parts(1_237_454, 0).saturating_mul(n.into())) + // Minimum execution time: 13_563_000 picoseconds. + Weight::from_parts(17_495_215, 3795) + // Standard Error: 2_294 + .saturating_add(Weight::from_parts(1_207_393, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 2540).saturating_mul(n.into())) } - /// Storage: `RankedCollective::Members` (r:2 w:2) /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::MemberCount` (r:2 w:2) /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `RankedCollective::IdToIndex` (r:2 w:2) + /// Storage: `RankedCollective::IdToIndex` (r:2 w:4) /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:2 w:2) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:0) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:2 w:2) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::IndexToId` (r:0 w:2) /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn exchange_member() -> Weight { // Proof Size summary in bytes: - // Measured: `437` - // Estimated: `6048` - // Minimum execution time: 138_000_000 picoseconds. - Weight::from_parts(141_000_000, 0) - .saturating_add(Weight::from_parts(0, 6048)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(8)) + // Measured: `625` + // Estimated: `19894` + // Minimum execution time: 70_713_000 picoseconds. + Weight::from_parts(72_831_000, 19894) + .saturating_add(T::DbWeight::get().reads(11_u64)) + .saturating_add(T::DbWeight::get().writes(14_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:0 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:0 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn add_member() -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3507` - // Minimum execution time: 17_245_000 picoseconds. - Weight::from_parts(17_930_000, 3507) + // Minimum execution time: 15_389_000 picoseconds. + Weight::from_parts(15_901_000, 3507) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:11 w:11) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:11 w:11) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:11 w:11) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:11 w:11) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:11 w:22) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:11 w:22) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. fn remove_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `616 + r * (281 Β±0)` // Estimated: `3519 + r * (2529 Β±0)` - // Minimum execution time: 29_534_000 picoseconds. - Weight::from_parts(32_847_495, 3519) - // Standard Error: 24_211 - .saturating_add(Weight::from_parts(13_949_639, 0).saturating_mul(r.into())) + // Minimum execution time: 29_541_000 picoseconds. + Weight::from_parts(32_239_358, 3519) + // Standard Error: 23_406 + .saturating_add(Weight::from_parts(16_030_191, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) } - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:0 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:0 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:0 w:1) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:0 w:1) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. fn promote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `314 + r * (17 Β±0)` // Estimated: `3507` - // Minimum execution time: 20_333_000 picoseconds. - Weight::from_parts(21_592_224, 3507) - // Standard Error: 6_423 - .saturating_add(Weight::from_parts(321_314, 0).saturating_mul(r.into())) + // Minimum execution time: 17_939_000 picoseconds. + Weight::from_parts(19_290_416, 3507) + // Standard Error: 5_710 + .saturating_add(Weight::from_parts(374_399, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: RankedCollective Members (r:1 w:1) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedCollective MemberCount (r:1 w:1) - /// Proof: RankedCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: RankedCollective IdToIndex (r:1 w:1) - /// Proof: RankedCollective IdToIndex (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) - /// Storage: RankedCollective IndexToId (r:1 w:1) - /// Proof: RankedCollective IndexToId (max_values: None, max_size: Some(54), added: 2529, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:1) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::MemberCount` (r:1 w:1) + /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IdToIndex` (r:1 w:2) + /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::IndexToId` (r:1 w:2) + /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 10]`. fn demote_member(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `632 + r * (72 Β±0)` // Estimated: `3519` - // Minimum execution time: 29_446_000 picoseconds. - Weight::from_parts(32_447_715, 3519) - // Standard Error: 28_791 - .saturating_add(Weight::from_parts(822_890, 0).saturating_mul(r.into())) + // Minimum execution time: 29_609_000 picoseconds. + Weight::from_parts(32_686_167, 3519) + // Standard Error: 27_588 + .saturating_add(Weight::from_parts(789_212, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: RankedPolls ReferendumInfoFor (r:1 w:1) - /// Proof: RankedPolls ReferendumInfoFor (max_values: None, max_size: Some(330), added: 2805, mode: MaxEncodedLen) - /// Storage: RankedCollective Voting (r:1 w:1) - /// Proof: RankedCollective Voting (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `RankedPolls::ReferendumInfoFor` (r:1 w:1) + /// Proof: `RankedPolls::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(330), added: 2805, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Voting` (r:1 w:1) + /// Proof: `RankedCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote() -> Weight { // Proof Size summary in bytes: // Measured: `628` // Estimated: `219984` - // Minimum execution time: 45_474_000 picoseconds. - Weight::from_parts(47_228_000, 219984) + // Minimum execution time: 41_151_000 picoseconds. + Weight::from_parts(42_435_000, 219984) .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: RankedPolls ReferendumInfoFor (r:1 w:0) - /// Proof: RankedPolls ReferendumInfoFor (max_values: None, max_size: Some(330), added: 2805, mode: MaxEncodedLen) - /// Storage: RankedCollective VotingCleanup (r:1 w:0) - /// Proof: RankedCollective VotingCleanup (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: RankedCollective Voting (r:100 w:100) - /// Proof: RankedCollective Voting (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `RankedPolls::ReferendumInfoFor` (r:1 w:0) + /// Proof: `RankedPolls::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(330), added: 2805, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::VotingCleanup` (r:1 w:0) + /// Proof: `RankedCollective::VotingCleanup` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Voting` (r:100 w:100) + /// Proof: `RankedCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 100]`. fn cleanup_poll(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `462 + n * (50 Β±0)` // Estimated: `3795 + n * (2540 Β±0)` - // Minimum execution time: 13_903_000 picoseconds. - Weight::from_parts(18_209_102, 3795) - // Standard Error: 2_556 - .saturating_add(Weight::from_parts(1_237_454, 0).saturating_mul(n.into())) + // Minimum execution time: 13_563_000 picoseconds. + Weight::from_parts(17_495_215, 3795) + // Standard Error: 2_294 + .saturating_add(Weight::from_parts(1_207_393, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 2540).saturating_mul(n.into())) } - /// Storage: `RankedCollective::Members` (r:2 w:2) /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::MemberCount` (r:2 w:2) /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `RankedCollective::IdToIndex` (r:2 w:2) + /// Storage: `RankedCollective::IdToIndex` (r:2 w:4) /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::Member` (r:2 w:2) + /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:0) + /// Proof: `CoreFellowship::MemberEvidence` (`max_values`: None, `max_size`: Some(16429), added: 18904, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:2 w:2) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::IndexToId` (r:0 w:2) /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn exchange_member() -> Weight { // Proof Size summary in bytes: - // Measured: `437` - // Estimated: `6048` - // Minimum execution time: 138_000_000 picoseconds. - Weight::from_parts(141_000_000, 0) - .saturating_add(Weight::from_parts(0, 6048)) - .saturating_add(RocksDbWeight::get().reads(6)) - .saturating_add(RocksDbWeight::get().writes(8)) + // Measured: `625` + // Estimated: `19894` + // Minimum execution time: 70_713_000 picoseconds. + Weight::from_parts(72_831_000, 19894) + .saturating_add(RocksDbWeight::get().reads(11_u64)) + .saturating_add(RocksDbWeight::get().writes(14_u64)) } } diff --git a/substrate/frame/recovery/src/weights.rs b/substrate/frame/recovery/src/weights.rs index 84b19ae694e..fe5dbcfc222 100644 --- a/substrate/frame/recovery/src/weights.rs +++ b/substrate/frame/recovery/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_recovery +//! Autogenerated weights for `pallet_recovery` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/recovery/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/recovery/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_recovery. +/// Weight functions needed for `pallet_recovery`. pub trait WeightInfo { fn as_recovered() -> Weight; fn set_recovered() -> Weight; @@ -63,258 +62,266 @@ pub trait WeightInfo { fn cancel_recovered() -> Weight; } -/// Weights for pallet_recovery using the Substrate node and recommended hardware. +/// Weights for `pallet_recovery` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Recovery Proxy (r:1 w:0) - /// Proof: Recovery Proxy (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) + /// Storage: `Recovery::Proxy` (r:1 w:0) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn as_recovered() -> Weight { // Proof Size summary in bytes: - // Measured: `281` - // Estimated: `3545` - // Minimum execution time: 9_360_000 picoseconds. - Weight::from_parts(9_773_000, 3545) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Measured: `497` + // Estimated: `3997` + // Minimum execution time: 14_898_000 picoseconds. + Weight::from_parts(15_464_000, 3997) + .saturating_add(T::DbWeight::get().reads(3_u64)) } - /// Storage: Recovery Proxy (r:0 w:1) - /// Proof: Recovery Proxy (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) + /// Storage: `Recovery::Proxy` (r:0 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) fn set_recovered() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_146_000 picoseconds. - Weight::from_parts(9_507_000, 0) + // Minimum execution time: 7_424_000 picoseconds. + Weight::from_parts(7_830_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Recovery Recoverable (r:1 w:1) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn create_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `175` + // Measured: `246` // Estimated: `3816` - // Minimum execution time: 26_472_000 picoseconds. - Weight::from_parts(27_917_651, 3816) - // Standard Error: 7_129 - .saturating_add(Weight::from_parts(59_239, 0).saturating_mul(n.into())) + // Minimum execution time: 21_968_000 picoseconds. + Weight::from_parts(23_594_232, 3816) + // Standard Error: 5_848 + .saturating_add(Weight::from_parts(24_843, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Recovery Recoverable (r:1 w:0) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) - /// Storage: Recovery ActiveRecoveries (r:1 w:1) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) fn initiate_recovery() -> Weight { // Proof Size summary in bytes: - // Measured: `272` + // Measured: `343` // Estimated: `3854` - // Minimum execution time: 29_618_000 picoseconds. - Weight::from_parts(30_192_000, 3854) + // Minimum execution time: 25_494_000 picoseconds. + Weight::from_parts(26_290_000, 3854) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Recovery Recoverable (r:1 w:0) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) - /// Storage: Recovery ActiveRecoveries (r:1 w:1) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn vouch_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `360 + n * (64 Β±0)` + // Measured: `431 + n * (64 Β±0)` // Estimated: `3854` - // Minimum execution time: 19_464_000 picoseconds. - Weight::from_parts(20_642_522, 3854) - // Standard Error: 5_974 - .saturating_add(Weight::from_parts(142_308, 0).saturating_mul(n.into())) + // Minimum execution time: 17_044_000 picoseconds. + Weight::from_parts(18_299_617, 3854) + // Standard Error: 5_580 + .saturating_add(Weight::from_parts(187_568, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Recovery Recoverable (r:1 w:0) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) - /// Storage: Recovery ActiveRecoveries (r:1 w:0) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) - /// Storage: Recovery Proxy (r:1 w:1) - /// Proof: Recovery Proxy (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn claim_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `392 + n * (64 Β±0)` + // Measured: `463 + n * (64 Β±0)` // Estimated: `3854` - // Minimum execution time: 23_656_000 picoseconds. - Weight::from_parts(24_903_269, 3854) - // Standard Error: 5_771 - .saturating_add(Weight::from_parts(117_343, 0).saturating_mul(n.into())) + // Minimum execution time: 22_186_000 picoseconds. + Weight::from_parts(23_476_807, 3854) + // Standard Error: 6_392 + .saturating_add(Weight::from_parts(89_770, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Recovery ActiveRecoveries (r:1 w:1) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn close_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `513 + n * (32 Β±0)` + // Measured: `584 + n * (32 Β±0)` // Estimated: `3854` - // Minimum execution time: 34_866_000 picoseconds. - Weight::from_parts(36_368_748, 3854) - // Standard Error: 6_600 - .saturating_add(Weight::from_parts(118_610, 0).saturating_mul(n.into())) + // Minimum execution time: 31_045_000 picoseconds. + Weight::from_parts(32_623_578, 3854) + // Standard Error: 7_203 + .saturating_add(Weight::from_parts(61_466, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Recovery ActiveRecoveries (r:1 w:0) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) - /// Storage: Recovery Recoverable (r:1 w:1) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn remove_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `270 + n * (32 Β±0)` + // Measured: `341 + n * (32 Β±0)` // Estimated: `3854` - // Minimum execution time: 31_405_000 picoseconds. - Weight::from_parts(32_552_838, 3854) - // Standard Error: 8_043 - .saturating_add(Weight::from_parts(171_605, 0).saturating_mul(n.into())) + // Minimum execution time: 27_925_000 picoseconds. + Weight::from_parts(29_258_264, 3854) + // Standard Error: 8_192 + .saturating_add(Weight::from_parts(128_124, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Recovery Proxy (r:1 w:1) - /// Proof: Recovery Proxy (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) fn cancel_recovered() -> Weight { // Proof Size summary in bytes: - // Measured: `281` + // Measured: `352` // Estimated: `3545` - // Minimum execution time: 11_530_000 picoseconds. - Weight::from_parts(11_851_000, 3545) + // Minimum execution time: 11_623_000 picoseconds. + Weight::from_parts(12_043_000, 3545) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Recovery Proxy (r:1 w:0) - /// Proof: Recovery Proxy (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) + /// Storage: `Recovery::Proxy` (r:1 w:0) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn as_recovered() -> Weight { // Proof Size summary in bytes: - // Measured: `281` - // Estimated: `3545` - // Minimum execution time: 9_360_000 picoseconds. - Weight::from_parts(9_773_000, 3545) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Measured: `497` + // Estimated: `3997` + // Minimum execution time: 14_898_000 picoseconds. + Weight::from_parts(15_464_000, 3997) + .saturating_add(RocksDbWeight::get().reads(3_u64)) } - /// Storage: Recovery Proxy (r:0 w:1) - /// Proof: Recovery Proxy (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) + /// Storage: `Recovery::Proxy` (r:0 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) fn set_recovered() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_146_000 picoseconds. - Weight::from_parts(9_507_000, 0) + // Minimum execution time: 7_424_000 picoseconds. + Weight::from_parts(7_830_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Recovery Recoverable (r:1 w:1) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn create_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `175` + // Measured: `246` // Estimated: `3816` - // Minimum execution time: 26_472_000 picoseconds. - Weight::from_parts(27_917_651, 3816) - // Standard Error: 7_129 - .saturating_add(Weight::from_parts(59_239, 0).saturating_mul(n.into())) + // Minimum execution time: 21_968_000 picoseconds. + Weight::from_parts(23_594_232, 3816) + // Standard Error: 5_848 + .saturating_add(Weight::from_parts(24_843, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Recovery Recoverable (r:1 w:0) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) - /// Storage: Recovery ActiveRecoveries (r:1 w:1) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) fn initiate_recovery() -> Weight { // Proof Size summary in bytes: - // Measured: `272` + // Measured: `343` // Estimated: `3854` - // Minimum execution time: 29_618_000 picoseconds. - Weight::from_parts(30_192_000, 3854) + // Minimum execution time: 25_494_000 picoseconds. + Weight::from_parts(26_290_000, 3854) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Recovery Recoverable (r:1 w:0) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) - /// Storage: Recovery ActiveRecoveries (r:1 w:1) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn vouch_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `360 + n * (64 Β±0)` + // Measured: `431 + n * (64 Β±0)` // Estimated: `3854` - // Minimum execution time: 19_464_000 picoseconds. - Weight::from_parts(20_642_522, 3854) - // Standard Error: 5_974 - .saturating_add(Weight::from_parts(142_308, 0).saturating_mul(n.into())) + // Minimum execution time: 17_044_000 picoseconds. + Weight::from_parts(18_299_617, 3854) + // Standard Error: 5_580 + .saturating_add(Weight::from_parts(187_568, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Recovery Recoverable (r:1 w:0) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) - /// Storage: Recovery ActiveRecoveries (r:1 w:0) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) - /// Storage: Recovery Proxy (r:1 w:1) - /// Proof: Recovery Proxy (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn claim_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `392 + n * (64 Β±0)` + // Measured: `463 + n * (64 Β±0)` // Estimated: `3854` - // Minimum execution time: 23_656_000 picoseconds. - Weight::from_parts(24_903_269, 3854) - // Standard Error: 5_771 - .saturating_add(Weight::from_parts(117_343, 0).saturating_mul(n.into())) + // Minimum execution time: 22_186_000 picoseconds. + Weight::from_parts(23_476_807, 3854) + // Standard Error: 6_392 + .saturating_add(Weight::from_parts(89_770, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Recovery ActiveRecoveries (r:1 w:1) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn close_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `513 + n * (32 Β±0)` + // Measured: `584 + n * (32 Β±0)` // Estimated: `3854` - // Minimum execution time: 34_866_000 picoseconds. - Weight::from_parts(36_368_748, 3854) - // Standard Error: 6_600 - .saturating_add(Weight::from_parts(118_610, 0).saturating_mul(n.into())) + // Minimum execution time: 31_045_000 picoseconds. + Weight::from_parts(32_623_578, 3854) + // Standard Error: 7_203 + .saturating_add(Weight::from_parts(61_466, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Recovery ActiveRecoveries (r:1 w:0) - /// Proof: Recovery ActiveRecoveries (max_values: None, max_size: Some(389), added: 2864, mode: MaxEncodedLen) - /// Storage: Recovery Recoverable (r:1 w:1) - /// Proof: Recovery Recoverable (max_values: None, max_size: Some(351), added: 2826, mode: MaxEncodedLen) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 9]`. fn remove_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `270 + n * (32 Β±0)` + // Measured: `341 + n * (32 Β±0)` // Estimated: `3854` - // Minimum execution time: 31_405_000 picoseconds. - Weight::from_parts(32_552_838, 3854) - // Standard Error: 8_043 - .saturating_add(Weight::from_parts(171_605, 0).saturating_mul(n.into())) + // Minimum execution time: 27_925_000 picoseconds. + Weight::from_parts(29_258_264, 3854) + // Standard Error: 8_192 + .saturating_add(Weight::from_parts(128_124, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Recovery Proxy (r:1 w:1) - /// Proof: Recovery Proxy (max_values: None, max_size: Some(80), added: 2555, mode: MaxEncodedLen) + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) fn cancel_recovered() -> Weight { // Proof Size summary in bytes: - // Measured: `281` + // Measured: `352` // Estimated: `3545` - // Minimum execution time: 11_530_000 picoseconds. - Weight::from_parts(11_851_000, 3545) + // Minimum execution time: 11_623_000 picoseconds. + Weight::from_parts(12_043_000, 3545) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/referenda/src/weights.rs b/substrate/frame/referenda/src/weights.rs index 4b89379b311..1f1b69873eb 100644 --- a/substrate/frame/referenda/src/weights.rs +++ b/substrate/frame/referenda/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_referenda +//! Autogenerated weights for `pallet_referenda` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/referenda/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/referenda/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_referenda. +/// Weight functions needed for `pallet_referenda`. pub trait WeightInfo { fn submit() -> Weight; fn place_decision_deposit_preparing() -> Weight; @@ -84,842 +83,874 @@ pub trait WeightInfo { fn clear_metadata() -> Weight; } -/// Weights for pallet_referenda using the Substrate node and recommended hardware. +/// Weights for `pallet_referenda` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Referenda ReferendumCount (r:1 w:1) - /// Proof: Referenda ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:0 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumCount` (r:1 w:1) + /// Proof: `Referenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:0 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `220` + // Measured: `286` // Estimated: `110487` - // Minimum execution time: 40_175_000 picoseconds. - Weight::from_parts(41_107_000, 110487) + // Minimum execution time: 31_536_000 picoseconds. + Weight::from_parts(32_797_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `473` + // Measured: `539` // Estimated: `219984` - // Minimum execution time: 50_922_000 picoseconds. - Weight::from_parts(52_179_000, 219984) + // Minimum execution time: 42_579_000 picoseconds. + Weight::from_parts(43_877_000, 219984) .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:0) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3260` + // Measured: `3326` // Estimated: `110487` - // Minimum execution time: 69_559_000 picoseconds. - Weight::from_parts(72_143_000, 110487) + // Minimum execution time: 61_439_000 picoseconds. + Weight::from_parts(63_681_000, 110487) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:0) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3280` + // Measured: `3346` // Estimated: `110487` - // Minimum execution time: 68_833_000 picoseconds. - Weight::from_parts(70_987_000, 110487) + // Minimum execution time: 60_136_000 picoseconds. + Weight::from_parts(61_841_000, 110487) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `473` + // Measured: `539` // Estimated: `219984` - // Minimum execution time: 61_794_000 picoseconds. - Weight::from_parts(62_846_000, 219984) + // Minimum execution time: 49_865_000 picoseconds. + Weight::from_parts(51_347_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) - } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + .saturating_add(T::DbWeight::get().writes(5_u64)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `473` + // Measured: `539` // Estimated: `219984` - // Minimum execution time: 58_664_000 picoseconds. - Weight::from_parts(60_195_000, 219984) + // Minimum execution time: 47_887_000 picoseconds. + Weight::from_parts(49_927_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn refund_decision_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `351` + // Measured: `417` // Estimated: `3831` - // Minimum execution time: 30_850_000 picoseconds. - Weight::from_parts(32_130_000, 3831) + // Minimum execution time: 25_721_000 picoseconds. + Weight::from_parts(26_922_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn refund_submission_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `341` + // Measured: `407` // Estimated: `3831` - // Minimum execution time: 30_747_000 picoseconds. - Weight::from_parts(32_196_000, 3831) + // Minimum execution time: 25_840_000 picoseconds. + Weight::from_parts(26_498_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `381` + // Measured: `447` // Estimated: `219984` - // Minimum execution time: 36_139_000 picoseconds. - Weight::from_parts(37_252_000, 219984) + // Minimum execution time: 30_530_000 picoseconds. + Weight::from_parts(31_547_000, 219984) .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: Referenda MetadataOf (r:1 w:0) - /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:1 w:0) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn kill() -> Weight { // Proof Size summary in bytes: - // Measured: `622` + // Measured: `688` // Estimated: `219984` - // Minimum execution time: 80_862_000 picoseconds. - Weight::from_parts(83_045_000, 219984) + // Minimum execution time: 64_011_000 picoseconds. + Weight::from_parts(65_240_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Referenda TrackQueue (r:1 w:0) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: `Referenda::TrackQueue` (r:1 w:0) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) fn one_fewer_deciding_queue_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `240` // Estimated: `5477` - // Minimum execution time: 10_136_000 picoseconds. - Weight::from_parts(10_638_000, 5477) + // Minimum execution time: 9_568_000 picoseconds. + Weight::from_parts(10_004_000, 5477) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `3150` + // Measured: `3216` // Estimated: `110487` - // Minimum execution time: 52_022_000 picoseconds. - Weight::from_parts(53_910_000, 110487) + // Minimum execution time: 43_325_000 picoseconds. + Weight::from_parts(45_335_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `3150` + // Measured: `3216` // Estimated: `110487` - // Minimum execution time: 53_683_000 picoseconds. - Weight::from_parts(55_707_000, 110487) + // Minimum execution time: 44_287_000 picoseconds. + Weight::from_parts(45_164_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `3011` + // Measured: `3077` // Estimated: `5477` - // Minimum execution time: 24_043_000 picoseconds. - Weight::from_parts(24_512_000, 5477) + // Minimum execution time: 21_909_000 picoseconds. + Weight::from_parts(22_641_000, 5477) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `3011` + // Measured: `3077` // Estimated: `5477` - // Minimum execution time: 23_588_000 picoseconds. - Weight::from_parts(24_422_000, 5477) + // Minimum execution time: 21_626_000 picoseconds. + Weight::from_parts(22_338_000, 5477) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:0) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3015` + // Measured: `3081` // Estimated: `5477` - // Minimum execution time: 31_443_000 picoseconds. - Weight::from_parts(32_725_000, 5477) + // Minimum execution time: 29_245_000 picoseconds. + Weight::from_parts(30_147_000, 5477) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:0) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3035` + // Measured: `3101` // Estimated: `5477` - // Minimum execution time: 30_319_000 picoseconds. - Weight::from_parts(31_652_000, 5477) + // Minimum execution time: 28_512_000 picoseconds. + Weight::from_parts(29_781_000, 5477) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_no_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `333` + // Measured: `399` // Estimated: `110487` - // Minimum execution time: 23_062_000 picoseconds. - Weight::from_parts(23_614_000, 110487) + // Minimum execution time: 19_208_000 picoseconds. + Weight::from_parts(19_947_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `381` + // Measured: `447` // Estimated: `110487` - // Minimum execution time: 23_537_000 picoseconds. - Weight::from_parts(24_267_000, 110487) + // Minimum execution time: 19_708_000 picoseconds. + Weight::from_parts(20_783_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn nudge_referendum_timed_out() -> Weight { // Proof Size summary in bytes: - // Measured: `278` + // Measured: `344` // Estimated: `3831` - // Minimum execution time: 16_388_000 picoseconds. - Weight::from_parts(16_676_000, 3831) + // Minimum execution time: 12_947_000 picoseconds. + Weight::from_parts(13_582_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `381` + // Measured: `447` // Estimated: `110487` - // Minimum execution time: 32_801_000 picoseconds. - Weight::from_parts(34_053_000, 110487) + // Minimum execution time: 26_699_000 picoseconds. + Weight::from_parts(28_048_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `381` + // Measured: `447` // Estimated: `110487` - // Minimum execution time: 35_704_000 picoseconds. - Weight::from_parts(36_451_000, 110487) + // Minimum execution time: 28_132_000 picoseconds. + Weight::from_parts(29_520_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_begin_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `500` // Estimated: `110487` - // Minimum execution time: 29_151_000 picoseconds. - Weight::from_parts(30_055_000, 110487) + // Minimum execution time: 23_212_000 picoseconds. + Weight::from_parts(24_200_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_end_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `417` + // Measured: `483` // Estimated: `110487` - // Minimum execution time: 29_265_000 picoseconds. - Weight::from_parts(30_213_000, 110487) + // Minimum execution time: 22_807_000 picoseconds. + Weight::from_parts(23_858_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_continue_not_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `500` // Estimated: `110487` - // Minimum execution time: 27_760_000 picoseconds. - Weight::from_parts(28_381_000, 110487) + // Minimum execution time: 22_862_000 picoseconds. + Weight::from_parts(23_627_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_continue_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `438` + // Measured: `504` // Estimated: `110487` - // Minimum execution time: 25_464_000 picoseconds. - Weight::from_parts(26_348_000, 110487) + // Minimum execution time: 21_030_000 picoseconds. + Weight::from_parts(21_710_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: Scheduler Lookup (r:1 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn nudge_referendum_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `438` + // Measured: `504` // Estimated: `219984` - // Minimum execution time: 42_629_000 picoseconds. - Weight::from_parts(43_732_000, 219984) + // Minimum execution time: 33_031_000 picoseconds. + Weight::from_parts(34_518_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_rejected() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `500` // Estimated: `110487` - // Minimum execution time: 30_015_000 picoseconds. - Weight::from_parts(30_827_000, 110487) + // Minimum execution time: 23_198_000 picoseconds. + Weight::from_parts(24_238_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Referenda MetadataOf (r:0 w:1) - /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:0 w:1) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `422` + // Measured: `555` // Estimated: `3831` - // Minimum execution time: 19_901_000 picoseconds. - Weight::from_parts(20_681_000, 3831) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Minimum execution time: 19_502_000 picoseconds. + Weight::from_parts(20_105_000, 3831) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda MetadataOf (r:1 w:1) - /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:1 w:1) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `355` + // Measured: `421` // Estimated: `3831` - // Minimum execution time: 17_323_000 picoseconds. - Weight::from_parts(18_227_000, 3831) + // Minimum execution time: 15_417_000 picoseconds. + Weight::from_parts(15_916_000, 3831) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Referenda ReferendumCount (r:1 w:1) - /// Proof: Referenda ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:0 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumCount` (r:1 w:1) + /// Proof: `Referenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:0 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `220` + // Measured: `286` // Estimated: `110487` - // Minimum execution time: 40_175_000 picoseconds. - Weight::from_parts(41_107_000, 110487) + // Minimum execution time: 31_536_000 picoseconds. + Weight::from_parts(32_797_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `473` + // Measured: `539` // Estimated: `219984` - // Minimum execution time: 50_922_000 picoseconds. - Weight::from_parts(52_179_000, 219984) + // Minimum execution time: 42_579_000 picoseconds. + Weight::from_parts(43_877_000, 219984) .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:0) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3260` + // Measured: `3326` // Estimated: `110487` - // Minimum execution time: 69_559_000 picoseconds. - Weight::from_parts(72_143_000, 110487) + // Minimum execution time: 61_439_000 picoseconds. + Weight::from_parts(63_681_000, 110487) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:0) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3280` + // Measured: `3346` // Estimated: `110487` - // Minimum execution time: 68_833_000 picoseconds. - Weight::from_parts(70_987_000, 110487) + // Minimum execution time: 60_136_000 picoseconds. + Weight::from_parts(61_841_000, 110487) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `473` + // Measured: `539` // Estimated: `219984` - // Minimum execution time: 61_794_000 picoseconds. - Weight::from_parts(62_846_000, 219984) + // Minimum execution time: 49_865_000 picoseconds. + Weight::from_parts(51_347_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) - } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + .saturating_add(RocksDbWeight::get().writes(5_u64)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `473` + // Measured: `539` // Estimated: `219984` - // Minimum execution time: 58_664_000 picoseconds. - Weight::from_parts(60_195_000, 219984) + // Minimum execution time: 47_887_000 picoseconds. + Weight::from_parts(49_927_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn refund_decision_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `351` + // Measured: `417` // Estimated: `3831` - // Minimum execution time: 30_850_000 picoseconds. - Weight::from_parts(32_130_000, 3831) + // Minimum execution time: 25_721_000 picoseconds. + Weight::from_parts(26_922_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn refund_submission_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `341` + // Measured: `407` // Estimated: `3831` - // Minimum execution time: 30_747_000 picoseconds. - Weight::from_parts(32_196_000, 3831) + // Minimum execution time: 25_840_000 picoseconds. + Weight::from_parts(26_498_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `381` + // Measured: `447` // Estimated: `219984` - // Minimum execution time: 36_139_000 picoseconds. - Weight::from_parts(37_252_000, 219984) + // Minimum execution time: 30_530_000 picoseconds. + Weight::from_parts(31_547_000, 219984) .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: Referenda MetadataOf (r:1 w:0) - /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:1 w:0) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn kill() -> Weight { // Proof Size summary in bytes: - // Measured: `622` + // Measured: `688` // Estimated: `219984` - // Minimum execution time: 80_862_000 picoseconds. - Weight::from_parts(83_045_000, 219984) + // Minimum execution time: 64_011_000 picoseconds. + Weight::from_parts(65_240_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Referenda TrackQueue (r:1 w:0) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: `Referenda::TrackQueue` (r:1 w:0) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) fn one_fewer_deciding_queue_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `240` // Estimated: `5477` - // Minimum execution time: 10_136_000 picoseconds. - Weight::from_parts(10_638_000, 5477) + // Minimum execution time: 9_568_000 picoseconds. + Weight::from_parts(10_004_000, 5477) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `3150` + // Measured: `3216` // Estimated: `110487` - // Minimum execution time: 52_022_000 picoseconds. - Weight::from_parts(53_910_000, 110487) + // Minimum execution time: 43_325_000 picoseconds. + Weight::from_parts(45_335_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `3150` + // Measured: `3216` // Estimated: `110487` - // Minimum execution time: 53_683_000 picoseconds. - Weight::from_parts(55_707_000, 110487) + // Minimum execution time: 44_287_000 picoseconds. + Weight::from_parts(45_164_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `3011` + // Measured: `3077` // Estimated: `5477` - // Minimum execution time: 24_043_000 picoseconds. - Weight::from_parts(24_512_000, 5477) + // Minimum execution time: 21_909_000 picoseconds. + Weight::from_parts(22_641_000, 5477) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `3011` + // Measured: `3077` // Estimated: `5477` - // Minimum execution time: 23_588_000 picoseconds. - Weight::from_parts(24_422_000, 5477) + // Minimum execution time: 21_626_000 picoseconds. + Weight::from_parts(22_338_000, 5477) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:0) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3015` + // Measured: `3081` // Estimated: `5477` - // Minimum execution time: 31_443_000 picoseconds. - Weight::from_parts(32_725_000, 5477) + // Minimum execution time: 29_245_000 picoseconds. + Weight::from_parts(30_147_000, 5477) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:0) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Referenda TrackQueue (r:1 w:1) - /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3035` + // Measured: `3101` // Estimated: `5477` - // Minimum execution time: 30_319_000 picoseconds. - Weight::from_parts(31_652_000, 5477) + // Minimum execution time: 28_512_000 picoseconds. + Weight::from_parts(29_781_000, 5477) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_no_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `333` + // Measured: `399` // Estimated: `110487` - // Minimum execution time: 23_062_000 picoseconds. - Weight::from_parts(23_614_000, 110487) + // Minimum execution time: 19_208_000 picoseconds. + Weight::from_parts(19_947_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `381` + // Measured: `447` // Estimated: `110487` - // Minimum execution time: 23_537_000 picoseconds. - Weight::from_parts(24_267_000, 110487) + // Minimum execution time: 19_708_000 picoseconds. + Weight::from_parts(20_783_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) fn nudge_referendum_timed_out() -> Weight { // Proof Size summary in bytes: - // Measured: `278` + // Measured: `344` // Estimated: `3831` - // Minimum execution time: 16_388_000 picoseconds. - Weight::from_parts(16_676_000, 3831) + // Minimum execution time: 12_947_000 picoseconds. + Weight::from_parts(13_582_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `381` + // Measured: `447` // Estimated: `110487` - // Minimum execution time: 32_801_000 picoseconds. - Weight::from_parts(34_053_000, 110487) + // Minimum execution time: 26_699_000 picoseconds. + Weight::from_parts(28_048_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda DecidingCount (r:1 w:1) - /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `381` + // Measured: `447` // Estimated: `110487` - // Minimum execution time: 35_704_000 picoseconds. - Weight::from_parts(36_451_000, 110487) + // Minimum execution time: 28_132_000 picoseconds. + Weight::from_parts(29_520_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_begin_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `500` // Estimated: `110487` - // Minimum execution time: 29_151_000 picoseconds. - Weight::from_parts(30_055_000, 110487) + // Minimum execution time: 23_212_000 picoseconds. + Weight::from_parts(24_200_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_end_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `417` + // Measured: `483` // Estimated: `110487` - // Minimum execution time: 29_265_000 picoseconds. - Weight::from_parts(30_213_000, 110487) + // Minimum execution time: 22_807_000 picoseconds. + Weight::from_parts(23_858_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_continue_not_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `500` // Estimated: `110487` - // Minimum execution time: 27_760_000 picoseconds. - Weight::from_parts(28_381_000, 110487) + // Minimum execution time: 22_862_000 picoseconds. + Weight::from_parts(23_627_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_continue_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `438` + // Measured: `504` // Estimated: `110487` - // Minimum execution time: 25_464_000 picoseconds. - Weight::from_parts(26_348_000, 110487) + // Minimum execution time: 21_030_000 picoseconds. + Weight::from_parts(21_710_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: Scheduler Lookup (r:1 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn nudge_referendum_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `438` + // Measured: `504` // Estimated: `219984` - // Minimum execution time: 42_629_000 picoseconds. - Weight::from_parts(43_732_000, 219984) + // Minimum execution time: 33_031_000 picoseconds. + Weight::from_parts(34_518_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) fn nudge_referendum_rejected() -> Weight { // Proof Size summary in bytes: - // Measured: `434` + // Measured: `500` // Estimated: `110487` - // Minimum execution time: 30_015_000 picoseconds. - Weight::from_parts(30_827_000, 110487) + // Minimum execution time: 23_198_000 picoseconds. + Weight::from_parts(24_238_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Referenda MetadataOf (r:0 w:1) - /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:0 w:1) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `422` + // Measured: `555` // Estimated: `3831` - // Minimum execution time: 19_901_000 picoseconds. - Weight::from_parts(20_681_000, 3831) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Minimum execution time: 19_502_000 picoseconds. + Weight::from_parts(20_105_000, 3831) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(366), added: 2841, mode: MaxEncodedLen) - /// Storage: Referenda MetadataOf (r:1 w:1) - /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(366), added: 2841, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:1 w:1) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `355` + // Measured: `421` // Estimated: `3831` - // Minimum execution time: 17_323_000 picoseconds. - Weight::from_parts(18_227_000, 3831) + // Minimum execution time: 15_417_000 picoseconds. + Weight::from_parts(15_916_000, 3831) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/remark/src/weights.rs b/substrate/frame/remark/src/weights.rs index 46475db163f..61cca5b7d05 100644 --- a/substrate/frame/remark/src/weights.rs +++ b/substrate/frame/remark/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_remark +//! Autogenerated weights for `pallet_remark` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/remark/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/remark/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,12 +49,12 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_remark. +/// Weight functions needed for `pallet_remark`. pub trait WeightInfo { fn store(l: u32, ) -> Weight; } -/// Weights for pallet_remark using the Substrate node and recommended hardware. +/// Weights for `pallet_remark` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `l` is `[1, 1048576]`. @@ -63,23 +62,23 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_471_000 picoseconds. - Weight::from_parts(8_586_000, 0) + // Minimum execution time: 6_623_000 picoseconds. + Weight::from_parts(6_757_000, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_359, 0).saturating_mul(l.into())) + .saturating_add(Weight::from_parts(1_368, 0).saturating_mul(l.into())) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { /// The range of component `l` is `[1, 1048576]`. fn store(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_471_000 picoseconds. - Weight::from_parts(8_586_000, 0) + // Minimum execution time: 6_623_000 picoseconds. + Weight::from_parts(6_757_000, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_359, 0).saturating_mul(l.into())) + .saturating_add(Weight::from_parts(1_368, 0).saturating_mul(l.into())) } } diff --git a/substrate/frame/safe-mode/src/weights.rs b/substrate/frame/safe-mode/src/weights.rs index f72bebcab9a..d326fab0f0f 100644 --- a/substrate/frame/safe-mode/src/weights.rs +++ b/substrate/frame/safe-mode/src/weights.rs @@ -17,27 +17,29 @@ //! Autogenerated weights for `pallet_safe_mode` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-aahe6cbd-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_safe_mode +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_safe_mode -// --chain=dev -// --header=./HEADER-APACHE2 -// --output=./frame/safe-mode/src/weights.rs -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/safe-mode/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -70,8 +72,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1489` - // Minimum execution time: 2_500_000 picoseconds. - Weight::from_parts(2_594_000, 1489) + // Minimum execution time: 2_216_000 picoseconds. + Weight::from_parts(2_309_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) @@ -80,23 +82,23 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 8_868_000 picoseconds. - Weight::from_parts(9_415_000, 1489) + // Minimum execution time: 6_124_000 picoseconds. + Weight::from_parts(6_601_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `SafeMode::Deposits` (r:0 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn enter() -> Weight { // Proof Size summary in bytes: // Measured: `142` - // Estimated: `3550` - // Minimum execution time: 50_541_000 picoseconds. - Weight::from_parts(51_558_000, 3550) + // Estimated: `3658` + // Minimum execution time: 44_825_000 picoseconds. + Weight::from_parts(45_845_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -106,23 +108,23 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1489` - // Minimum execution time: 10_489_000 picoseconds. - Weight::from_parts(10_833_000, 1489) + // Minimum execution time: 7_603_000 picoseconds. + Weight::from_parts(7_954_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `SafeMode::Deposits` (r:0 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn extend() -> Weight { // Proof Size summary in bytes: // Measured: `169` - // Estimated: `3550` - // Minimum execution time: 50_818_000 picoseconds. - Weight::from_parts(51_873_000, 3550) + // Estimated: `3658` + // Minimum execution time: 44_716_000 picoseconds. + Weight::from_parts(46_039_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -132,8 +134,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 10_843_000 picoseconds. - Weight::from_parts(11_314_000, 1489) + // Minimum execution time: 8_231_000 picoseconds. + Weight::from_parts(8_731_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -143,8 +145,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 10_382_000 picoseconds. - Weight::from_parts(10_814_000, 1489) + // Minimum execution time: 8_015_000 picoseconds. + Weight::from_parts(8_247_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -153,39 +155,39 @@ impl WeightInfo for SubstrateWeight { /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn release_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3550` - // Minimum execution time: 42_828_000 picoseconds. - Weight::from_parts(43_752_000, 3550) + // Estimated: `3658` + // Minimum execution time: 39_149_000 picoseconds. + Weight::from_parts(40_005_000, 3658) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `SafeMode::Deposits` (r:1 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn force_release_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3550` - // Minimum execution time: 40_196_000 picoseconds. - Weight::from_parts(41_298_000, 3550) + // Estimated: `3658` + // Minimum execution time: 37_528_000 picoseconds. + Weight::from_parts(38_473_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `SafeMode::Deposits` (r:1 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn force_slash_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3550` - // Minimum execution time: 33_660_000 picoseconds. - Weight::from_parts(34_426_000, 3550) + // Estimated: `3658` + // Minimum execution time: 29_417_000 picoseconds. + Weight::from_parts(30_195_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -199,8 +201,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1489` - // Minimum execution time: 2_500_000 picoseconds. - Weight::from_parts(2_594_000, 1489) + // Minimum execution time: 2_216_000 picoseconds. + Weight::from_parts(2_309_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) @@ -209,23 +211,23 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 8_868_000 picoseconds. - Weight::from_parts(9_415_000, 1489) + // Minimum execution time: 6_124_000 picoseconds. + Weight::from_parts(6_601_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `SafeMode::Deposits` (r:0 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn enter() -> Weight { // Proof Size summary in bytes: // Measured: `142` - // Estimated: `3550` - // Minimum execution time: 50_541_000 picoseconds. - Weight::from_parts(51_558_000, 3550) + // Estimated: `3658` + // Minimum execution time: 44_825_000 picoseconds. + Weight::from_parts(45_845_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -235,23 +237,23 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1489` - // Minimum execution time: 10_489_000 picoseconds. - Weight::from_parts(10_833_000, 1489) + // Minimum execution time: 7_603_000 picoseconds. + Weight::from_parts(7_954_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `SafeMode::Deposits` (r:0 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn extend() -> Weight { // Proof Size summary in bytes: // Measured: `169` - // Estimated: `3550` - // Minimum execution time: 50_818_000 picoseconds. - Weight::from_parts(51_873_000, 3550) + // Estimated: `3658` + // Minimum execution time: 44_716_000 picoseconds. + Weight::from_parts(46_039_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -261,8 +263,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 10_843_000 picoseconds. - Weight::from_parts(11_314_000, 1489) + // Minimum execution time: 8_231_000 picoseconds. + Weight::from_parts(8_731_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -272,8 +274,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 10_382_000 picoseconds. - Weight::from_parts(10_814_000, 1489) + // Minimum execution time: 8_015_000 picoseconds. + Weight::from_parts(8_247_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -282,39 +284,39 @@ impl WeightInfo for () { /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn release_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3550` - // Minimum execution time: 42_828_000 picoseconds. - Weight::from_parts(43_752_000, 3550) + // Estimated: `3658` + // Minimum execution time: 39_149_000 picoseconds. + Weight::from_parts(40_005_000, 3658) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `SafeMode::Deposits` (r:1 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn force_release_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3550` - // Minimum execution time: 40_196_000 picoseconds. - Weight::from_parts(41_298_000, 3550) + // Estimated: `3658` + // Minimum execution time: 37_528_000 picoseconds. + Weight::from_parts(38_473_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `SafeMode::Deposits` (r:1 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn force_slash_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3550` - // Minimum execution time: 33_660_000 picoseconds. - Weight::from_parts(34_426_000, 3550) + // Estimated: `3658` + // Minimum execution time: 29_417_000 picoseconds. + Weight::from_parts(30_195_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/salary/src/weights.rs b/substrate/frame/salary/src/weights.rs index 3d3b9e31595..c4f22a027eb 100644 --- a/substrate/frame/salary/src/weights.rs +++ b/substrate/frame/salary/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_salary +//! Autogenerated weights for `pallet_salary` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/salary/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/salary/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_salary. +/// Weight functions needed for `pallet_salary`. pub trait WeightInfo { fn init() -> Weight; fn bump() -> Weight; @@ -61,204 +60,204 @@ pub trait WeightInfo { fn check_payment() -> Weight; } -/// Weights for pallet_salary using the Substrate node and recommended hardware. +/// Weights for `pallet_salary` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) fn init() -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `1541` - // Minimum execution time: 10_778_000 picoseconds. - Weight::from_parts(11_084_000, 1541) + // Minimum execution time: 7_421_000 picoseconds. + Weight::from_parts(7_819_000, 1541) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) fn bump() -> Weight { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1541` - // Minimum execution time: 12_042_000 picoseconds. - Weight::from_parts(12_645_000, 1541) + // Minimum execution time: 8_651_000 picoseconds. + Weight::from_parts(9_056_000, 1541) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Salary Status (r:1 w:0) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:0) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) fn induct() -> Weight { // Proof Size summary in bytes: // Measured: `395` // Estimated: `3543` - // Minimum execution time: 18_374_000 picoseconds. - Weight::from_parts(19_200_000, 3543) + // Minimum execution time: 16_513_000 picoseconds. + Weight::from_parts(17_305_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) fn register() -> Weight { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3543` - // Minimum execution time: 22_696_000 picoseconds. - Weight::from_parts(23_275_000, 3543) + // Minimum execution time: 18_913_000 picoseconds. + Weight::from_parts(19_867_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) fn payout() -> Weight { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3543` - // Minimum execution time: 63_660_000 picoseconds. - Weight::from_parts(65_006_000, 3543) + // Minimum execution time: 53_297_000 picoseconds. + Weight::from_parts(55_144_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout_other() -> Weight { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3593` - // Minimum execution time: 64_706_000 picoseconds. - Weight::from_parts(65_763_000, 3593) + // Minimum execution time: 53_638_000 picoseconds. + Weight::from_parts(55_328_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) fn check_payment() -> Weight { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3543` - // Minimum execution time: 11_838_000 picoseconds. - Weight::from_parts(12_323_000, 3543) + // Minimum execution time: 10_557_000 picoseconds. + Weight::from_parts(11_145_000, 3543) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) fn init() -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `1541` - // Minimum execution time: 10_778_000 picoseconds. - Weight::from_parts(11_084_000, 1541) + // Minimum execution time: 7_421_000 picoseconds. + Weight::from_parts(7_819_000, 1541) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) fn bump() -> Weight { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1541` - // Minimum execution time: 12_042_000 picoseconds. - Weight::from_parts(12_645_000, 1541) + // Minimum execution time: 8_651_000 picoseconds. + Weight::from_parts(9_056_000, 1541) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Salary Status (r:1 w:0) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:0) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) fn induct() -> Weight { // Proof Size summary in bytes: // Measured: `395` // Estimated: `3543` - // Minimum execution time: 18_374_000 picoseconds. - Weight::from_parts(19_200_000, 3543) + // Minimum execution time: 16_513_000 picoseconds. + Weight::from_parts(17_305_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) fn register() -> Weight { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3543` - // Minimum execution time: 22_696_000 picoseconds. - Weight::from_parts(23_275_000, 3543) + // Minimum execution time: 18_913_000 picoseconds. + Weight::from_parts(19_867_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) fn payout() -> Weight { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3543` - // Minimum execution time: 63_660_000 picoseconds. - Weight::from_parts(65_006_000, 3543) + // Minimum execution time: 53_297_000 picoseconds. + Weight::from_parts(55_144_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) - /// Storage: RankedCollective Members (r:1 w:0) - /// Proof: RankedCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) + /// Storage: `RankedCollective::Members` (r:1 w:0) + /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout_other() -> Weight { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3593` - // Minimum execution time: 64_706_000 picoseconds. - Weight::from_parts(65_763_000, 3593) + // Minimum execution time: 53_638_000 picoseconds. + Weight::from_parts(55_328_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Salary Status (r:1 w:1) - /// Proof: Salary Status (max_values: Some(1), max_size: Some(56), added: 551, mode: MaxEncodedLen) - /// Storage: Salary Claimant (r:1 w:1) - /// Proof: Salary Claimant (max_values: None, max_size: Some(78), added: 2553, mode: MaxEncodedLen) + /// Storage: `Salary::Status` (r:1 w:1) + /// Proof: `Salary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `Salary::Claimant` (r:1 w:1) + /// Proof: `Salary::Claimant` (`max_values`: None, `max_size`: Some(78), added: 2553, mode: `MaxEncodedLen`) fn check_payment() -> Weight { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3543` - // Minimum execution time: 11_838_000 picoseconds. - Weight::from_parts(12_323_000, 3543) + // Minimum execution time: 10_557_000 picoseconds. + Weight::from_parts(11_145_000, 3543) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/sassafras/src/mock.rs b/substrate/frame/sassafras/src/mock.rs index 5e5909fcb0d..4c6efaa63a9 100644 --- a/substrate/frame/sassafras/src/mock.rs +++ b/substrate/frame/sassafras/src/mock.rs @@ -34,7 +34,8 @@ use sp_core::{ H256, U256, }; use sp_runtime::{ - testing::{Digest, DigestItem, Header, TestXt}, + generic::UncheckedExtrinsic, + testing::{Digest, DigestItem, Header}, BuildStorage, }; @@ -53,7 +54,7 @@ where RuntimeCall: From, { type OverarchingCall = RuntimeCall; - type Extrinsic = TestXt; + type Extrinsic = UncheckedExtrinsic; } impl pallet_sassafras::Config for Test { diff --git a/substrate/frame/scheduler/src/weights.rs b/substrate/frame/scheduler/src/weights.rs index 9b7e5405a1b..6c98145d266 100644 --- a/substrate/frame/scheduler/src/weights.rs +++ b/substrate/frame/scheduler/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_scheduler` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_scheduler +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_scheduler -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/scheduler/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -77,8 +79,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `31` // Estimated: `1489` - // Minimum execution time: 3_040_000 picoseconds. - Weight::from_parts(3_202_000, 1489) + // Minimum execution time: 3_128_000 picoseconds. + Weight::from_parts(3_372_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -89,10 +91,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `81 + s * (177 Β±0)` // Estimated: `110487` - // Minimum execution time: 3_462_000 picoseconds. - Weight::from_parts(6_262_125, 110487) - // Standard Error: 536 - .saturating_add(Weight::from_parts(332_570, 0).saturating_mul(s.into())) + // Minimum execution time: 3_560_000 picoseconds. + Weight::from_parts(6_356_795, 110487) + // Standard Error: 493 + .saturating_add(Weight::from_parts(315_098, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -100,8 +102,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_425_000 picoseconds. - Weight::from_parts(3_680_000, 0) + // Minimum execution time: 3_501_000 picoseconds. + Weight::from_parts(3_722_000, 0) } /// Storage: `Preimage::PreimageFor` (r:1 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) @@ -114,10 +116,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `246 + s * (1 Β±0)` // Estimated: `3711 + s * (1 Β±0)` - // Minimum execution time: 17_564_000 picoseconds. - Weight::from_parts(17_887_000, 3711) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_253, 0).saturating_mul(s.into())) + // Minimum execution time: 17_976_000 picoseconds. + Weight::from_parts(18_137_000, 3711) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_173, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) @@ -128,16 +130,16 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_934_000 picoseconds. - Weight::from_parts(5_275_000, 0) + // Minimum execution time: 4_935_000 picoseconds. + Weight::from_parts(5_133_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } fn service_task_periodic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_348_000 picoseconds. - Weight::from_parts(3_561_000, 0) + // Minimum execution time: 3_467_000 picoseconds. + Weight::from_parts(3_654_000, 0) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -147,16 +149,16 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 6_395_000 picoseconds. - Weight::from_parts(6_642_000, 3997) + // Minimum execution time: 6_528_000 picoseconds. + Weight::from_parts(6_820_000, 3997) .saturating_add(T::DbWeight::get().reads(2_u64)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_167_000 picoseconds. - Weight::from_parts(2_266_000, 0) + // Minimum execution time: 2_202_000 picoseconds. + Weight::from_parts(2_360_000, 0) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) @@ -165,15 +167,17 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `81 + s * (177 Β±0)` // Estimated: `110487` - // Minimum execution time: 10_009_000 picoseconds. - Weight::from_parts(13_565_985, 110487) - // Standard Error: 575 - .saturating_add(Weight::from_parts(354_760, 0).saturating_mul(s.into())) + // Minimum execution time: 10_222_000 picoseconds. + Weight::from_parts(13_654_958, 110487) + // Standard Error: 676 + .saturating_add(Weight::from_parts(338_633, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Lookup` (r:0 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 512]`. @@ -181,12 +185,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `81 + s * (177 Β±0)` // Estimated: `110487` - // Minimum execution time: 14_048_000 picoseconds. - Weight::from_parts(15_141_696, 110487) - // Standard Error: 1_082 - .saturating_add(Weight::from_parts(533_390, 0).saturating_mul(s.into())) + // Minimum execution time: 15_517_000 picoseconds. + Weight::from_parts(17_464_075, 110487) + // Standard Error: 952 + .saturating_add(Weight::from_parts(495_806, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Scheduler::Lookup` (r:1 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) @@ -197,10 +201,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `596 + s * (178 Β±0)` // Estimated: `110487` - // Minimum execution time: 12_902_000 picoseconds. - Weight::from_parts(18_957_156, 110487) - // Standard Error: 792 - .saturating_add(Weight::from_parts(361_909, 0).saturating_mul(s.into())) + // Minimum execution time: 13_091_000 picoseconds. + Weight::from_parts(19_101_313, 110487) + // Standard Error: 662 + .saturating_add(Weight::from_parts(342_468, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -208,35 +212,35 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 512]`. fn cancel_named(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `709 + s * (177 Β±0)` // Estimated: `110487` - // Minimum execution time: 15_933_000 picoseconds. - Weight::from_parts(18_091_415, 110487) - // Standard Error: 779 - .saturating_add(Weight::from_parts(534_402, 0).saturating_mul(s.into())) + // Minimum execution time: 17_579_000 picoseconds. + Weight::from_parts(20_561_921, 110487) + // Standard Error: 792 + .saturating_add(Weight::from_parts(500_463, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: `Scheduler::Retries` (r:1 w:2) - /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Lookup` (r:0 w:1) - /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 512]`. fn schedule_retry(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `159` + // Measured: `118` // Estimated: `110487` - // Minimum execution time: 14_155_000 picoseconds. - Weight::from_parts(16_447_031, 110487) - // Standard Error: 233 - .saturating_add(Weight::from_parts(8_424, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Minimum execution time: 8_996_000 picoseconds. + Weight::from_parts(11_393_234, 110487) + // Standard Error: 190 + .saturating_add(Weight::from_parts(6_714, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Scheduler::Agenda` (r:1 w:0) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) @@ -244,10 +248,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn set_retry() -> Weight { // Proof Size summary in bytes: - // Measured: `81 + s * (177 Β±0)` + // Measured: `90705` // Estimated: `110487` - // Minimum execution time: 8_130_000 picoseconds. - Weight::from_parts(9_047_554, 110487) + // Minimum execution time: 121_505_000 picoseconds. + Weight::from_parts(124_306_000, 110487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -259,10 +263,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn set_retry_named() -> Weight { // Proof Size summary in bytes: - // Measured: `647 + s * (178 Β±0)` + // Measured: `91747` // Estimated: `110487` - // Minimum execution time: 10_838_000 picoseconds. - Weight::from_parts(12_804_076, 110487) + // Minimum execution time: 128_070_000 picoseconds. + Weight::from_parts(132_683_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -272,10 +276,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel_retry() -> Weight { // Proof Size summary in bytes: - // Measured: `81 + s * (177 Β±0)` + // Measured: `90717` // Estimated: `110487` - // Minimum execution time: 8_130_000 picoseconds. - Weight::from_parts(9_047_554, 110487) + // Minimum execution time: 118_260_000 picoseconds. + Weight::from_parts(119_722_000, 110487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -287,10 +291,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel_retry_named() -> Weight { // Proof Size summary in bytes: - // Measured: `647 + s * (178 Β±0)` + // Measured: `91759` // Estimated: `110487` - // Minimum execution time: 10_838_000 picoseconds. - Weight::from_parts(12_804_076, 110487) + // Minimum execution time: 129_036_000 picoseconds. + Weight::from_parts(133_975_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -304,8 +308,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `31` // Estimated: `1489` - // Minimum execution time: 3_040_000 picoseconds. - Weight::from_parts(3_202_000, 1489) + // Minimum execution time: 3_128_000 picoseconds. + Weight::from_parts(3_372_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -316,10 +320,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `81 + s * (177 Β±0)` // Estimated: `110487` - // Minimum execution time: 3_462_000 picoseconds. - Weight::from_parts(6_262_125, 110487) - // Standard Error: 536 - .saturating_add(Weight::from_parts(332_570, 0).saturating_mul(s.into())) + // Minimum execution time: 3_560_000 picoseconds. + Weight::from_parts(6_356_795, 110487) + // Standard Error: 493 + .saturating_add(Weight::from_parts(315_098, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -327,8 +331,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_425_000 picoseconds. - Weight::from_parts(3_680_000, 0) + // Minimum execution time: 3_501_000 picoseconds. + Weight::from_parts(3_722_000, 0) } /// Storage: `Preimage::PreimageFor` (r:1 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) @@ -341,10 +345,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `246 + s * (1 Β±0)` // Estimated: `3711 + s * (1 Β±0)` - // Minimum execution time: 17_564_000 picoseconds. - Weight::from_parts(17_887_000, 3711) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_253, 0).saturating_mul(s.into())) + // Minimum execution time: 17_976_000 picoseconds. + Weight::from_parts(18_137_000, 3711) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_173, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) @@ -355,16 +359,16 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_934_000 picoseconds. - Weight::from_parts(5_275_000, 0) + // Minimum execution time: 4_935_000 picoseconds. + Weight::from_parts(5_133_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } fn service_task_periodic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_348_000 picoseconds. - Weight::from_parts(3_561_000, 0) + // Minimum execution time: 3_467_000 picoseconds. + Weight::from_parts(3_654_000, 0) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -374,16 +378,16 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 6_395_000 picoseconds. - Weight::from_parts(6_642_000, 3997) + // Minimum execution time: 6_528_000 picoseconds. + Weight::from_parts(6_820_000, 3997) .saturating_add(RocksDbWeight::get().reads(2_u64)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_167_000 picoseconds. - Weight::from_parts(2_266_000, 0) + // Minimum execution time: 2_202_000 picoseconds. + Weight::from_parts(2_360_000, 0) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) @@ -392,15 +396,17 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `81 + s * (177 Β±0)` // Estimated: `110487` - // Minimum execution time: 10_009_000 picoseconds. - Weight::from_parts(13_565_985, 110487) - // Standard Error: 575 - .saturating_add(Weight::from_parts(354_760, 0).saturating_mul(s.into())) + // Minimum execution time: 10_222_000 picoseconds. + Weight::from_parts(13_654_958, 110487) + // Standard Error: 676 + .saturating_add(Weight::from_parts(338_633, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Lookup` (r:0 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 512]`. @@ -408,12 +414,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `81 + s * (177 Β±0)` // Estimated: `110487` - // Minimum execution time: 14_048_000 picoseconds. - Weight::from_parts(15_141_696, 110487) - // Standard Error: 1_082 - .saturating_add(Weight::from_parts(533_390, 0).saturating_mul(s.into())) + // Minimum execution time: 15_517_000 picoseconds. + Weight::from_parts(17_464_075, 110487) + // Standard Error: 952 + .saturating_add(Weight::from_parts(495_806, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Scheduler::Lookup` (r:1 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) @@ -424,10 +430,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `596 + s * (178 Β±0)` // Estimated: `110487` - // Minimum execution time: 12_902_000 picoseconds. - Weight::from_parts(18_957_156, 110487) - // Standard Error: 792 - .saturating_add(Weight::from_parts(361_909, 0).saturating_mul(s.into())) + // Minimum execution time: 13_091_000 picoseconds. + Weight::from_parts(19_101_313, 110487) + // Standard Error: 662 + .saturating_add(Weight::from_parts(342_468, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -435,35 +441,35 @@ impl WeightInfo for () { /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 512]`. fn cancel_named(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `709 + s * (177 Β±0)` // Estimated: `110487` - // Minimum execution time: 15_933_000 picoseconds. - Weight::from_parts(18_091_415, 110487) - // Standard Error: 779 - .saturating_add(Weight::from_parts(534_402, 0).saturating_mul(s.into())) + // Minimum execution time: 17_579_000 picoseconds. + Weight::from_parts(20_561_921, 110487) + // Standard Error: 792 + .saturating_add(Weight::from_parts(500_463, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: `Scheduler::Retries` (r:1 w:2) - /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Lookup` (r:0 w:1) - /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 512]`. fn schedule_retry(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `159` + // Measured: `118` // Estimated: `110487` - // Minimum execution time: 14_155_000 picoseconds. - Weight::from_parts(16_447_031, 110487) - // Standard Error: 233 - .saturating_add(Weight::from_parts(8_424, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Minimum execution time: 8_996_000 picoseconds. + Weight::from_parts(11_393_234, 110487) + // Standard Error: 190 + .saturating_add(Weight::from_parts(6_714, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Scheduler::Agenda` (r:1 w:0) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) @@ -471,10 +477,10 @@ impl WeightInfo for () { /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn set_retry() -> Weight { // Proof Size summary in bytes: - // Measured: `81 + s * (177 Β±0)` + // Measured: `90705` // Estimated: `110487` - // Minimum execution time: 8_130_000 picoseconds. - Weight::from_parts(9_047_554, 110487) + // Minimum execution time: 121_505_000 picoseconds. + Weight::from_parts(124_306_000, 110487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -486,10 +492,10 @@ impl WeightInfo for () { /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn set_retry_named() -> Weight { // Proof Size summary in bytes: - // Measured: `647 + s * (178 Β±0)` + // Measured: `91747` // Estimated: `110487` - // Minimum execution time: 10_838_000 picoseconds. - Weight::from_parts(12_804_076, 110487) + // Minimum execution time: 128_070_000 picoseconds. + Weight::from_parts(132_683_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -499,10 +505,10 @@ impl WeightInfo for () { /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel_retry() -> Weight { // Proof Size summary in bytes: - // Measured: `81 + s * (177 Β±0)` + // Measured: `90717` // Estimated: `110487` - // Minimum execution time: 8_130_000 picoseconds. - Weight::from_parts(9_047_554, 110487) + // Minimum execution time: 118_260_000 picoseconds. + Weight::from_parts(119_722_000, 110487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -514,10 +520,10 @@ impl WeightInfo for () { /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel_retry_named() -> Weight { // Proof Size summary in bytes: - // Measured: `647 + s * (178 Β±0)` + // Measured: `91759` // Estimated: `110487` - // Minimum execution time: 10_838_000 picoseconds. - Weight::from_parts(12_804_076, 110487) + // Minimum execution time: 129_036_000 picoseconds. + Weight::from_parts(133_975_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/session/src/weights.rs b/substrate/frame/session/src/weights.rs index dd9848fd2c1..09eb665ff3d 100644 --- a/substrate/frame/session/src/weights.rs +++ b/substrate/frame/session/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_session +//! Autogenerated weights for `pallet_session` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/session/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/session/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,77 +49,77 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_session. +/// Weight functions needed for `pallet_session`. pub trait WeightInfo { fn set_keys() -> Weight; fn purge_keys() -> Weight; } -/// Weights for pallet_session using the Substrate node and recommended hardware. +/// Weights for `pallet_session` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Session NextKeys (r:1 w:1) - /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session KeyOwner (r:4 w:4) - /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:6 w:6) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1924` - // Estimated: `12814` - // Minimum execution time: 55_459_000 picoseconds. - Weight::from_parts(56_180_000, 12814) - .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(5_u64)) + // Measured: `1919` + // Estimated: `17759` + // Minimum execution time: 57_921_000 picoseconds. + Weight::from_parts(58_960_000, 17759) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Session NextKeys (r:1 w:1) - /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session KeyOwner (r:0 w:4) - /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:0 w:6) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) fn purge_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1791` - // Estimated: `5256` - // Minimum execution time: 40_194_000 picoseconds. - Weight::from_parts(41_313_000, 5256) + // Measured: `1817` + // Estimated: `5282` + // Minimum execution time: 40_983_000 picoseconds. + Weight::from_parts(42_700_000, 5282) .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(5_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Session NextKeys (r:1 w:1) - /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session KeyOwner (r:4 w:4) - /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:6 w:6) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1924` - // Estimated: `12814` - // Minimum execution time: 55_459_000 picoseconds. - Weight::from_parts(56_180_000, 12814) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(5_u64)) + // Measured: `1919` + // Estimated: `17759` + // Minimum execution time: 57_921_000 picoseconds. + Weight::from_parts(58_960_000, 17759) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } - /// Storage: Staking Ledger (r:1 w:0) - /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) - /// Storage: Session NextKeys (r:1 w:1) - /// Proof Skipped: Session NextKeys (max_values: None, max_size: None, mode: Measured) - /// Storage: Session KeyOwner (r:0 w:4) - /// Proof Skipped: Session KeyOwner (max_values: None, max_size: None, mode: Measured) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:0 w:6) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) fn purge_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1791` - // Estimated: `5256` - // Minimum execution time: 40_194_000 picoseconds. - Weight::from_parts(41_313_000, 5256) + // Measured: `1817` + // Estimated: `5282` + // Minimum execution time: 40_983_000 picoseconds. + Weight::from_parts(42_700_000, 5282) .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(5_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } } diff --git a/substrate/frame/society/src/weights.rs b/substrate/frame/society/src/weights.rs index c32c2383ac9..1245f762972 100644 --- a/substrate/frame/society/src/weights.rs +++ b/substrate/frame/society/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -15,35 +15,41 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_society +//! Autogenerated weights for `pallet_society` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-09-13, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev // --steps=50 // --repeat=20 // --pallet=pallet_society +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs -// --header=./HEADER-APACHE2 -// --output=./frame/society/src/weights.rs +// --heap-pages=4096 +// --output=./substrate/frame/society/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_society. +/// Weight functions needed for `pallet_society`. pub trait WeightInfo { fn bid() -> Weight; fn unbid() -> Weight; @@ -67,309 +73,739 @@ pub trait WeightInfo { fn cleanup_challenge() -> Weight; } -/// Weights for pallet_society using the Substrate node and recommended hardware. +/// Weights for `pallet_society` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - // Storage: Society Bids (r:1 w:1) - // Storage: Society Candidates (r:1 w:0) - // Storage: Society Members (r:1 w:0) - // Storage: Society SuspendedMembers (r:1 w:0) - // Storage: Society Parameters (r:1 w:0) + /// Storage: `Society::Bids` (r:1 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:1 w:0) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:0) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::SuspendedMembers` (r:1 w:0) + /// Proof: `Society::SuspendedMembers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:1 w:0) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn bid() -> Weight { - Weight::zero() - } - // Storage: Society Bids (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `444` + // Estimated: `3909` + // Minimum execution time: 29_905_000 picoseconds. + Weight::from_parts(31_031_000, 3909) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Society::Bids` (r:1 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn unbid() -> Weight { - Weight::zero() - } - // Storage: Society Bids (r:1 w:1) - // Storage: Society Candidates (r:1 w:0) - // Storage: Society Members (r:2 w:1) - // Storage: Society SuspendedMembers (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `461` + // Estimated: `1946` + // Minimum execution time: 23_038_000 picoseconds. + Weight::from_parts(23_904_000, 1946) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Society::Bids` (r:1 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:1 w:0) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:2 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::SuspendedMembers` (r:1 w:0) + /// Proof: `Society::SuspendedMembers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn vouch() -> Weight { - Weight::zero() - } - // Storage: Society Bids (r:1 w:1) - // Storage: Society Members (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `481` + // Estimated: `6421` + // Minimum execution time: 21_197_000 picoseconds. + Weight::from_parts(22_043_000, 6421) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Bids` (r:1 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) fn unvouch() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society Members (r:1 w:0) - // Storage: Society Votes (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `535` + // Estimated: `4000` + // Minimum execution time: 14_402_000 picoseconds. + Weight::from_parts(15_171_000, 4000) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:0) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Votes` (r:1 w:1) + /// Proof: `Society::Votes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn vote() -> Weight { - Weight::zero() - } - // Storage: Society Defending (r:1 w:1) - // Storage: Society Members (r:1 w:0) - // Storage: Society ChallengeRoundCount (r:1 w:0) - // Storage: Society DefenderVotes (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `569` + // Estimated: `4034` + // Minimum execution time: 21_930_000 picoseconds. + Weight::from_parts(22_666_000, 4034) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Defending` (r:1 w:1) + /// Proof: `Society::Defending` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:0) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::ChallengeRoundCount` (r:1 w:0) + /// Proof: `Society::ChallengeRoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::DefenderVotes` (r:1 w:1) + /// Proof: `Society::DefenderVotes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn defender_vote() -> Weight { - Weight::zero() - } - // Storage: Society Members (r:1 w:0) - // Storage: Society Payouts (r:1 w:1) - // Storage: System Account (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `561` + // Estimated: `4026` + // Minimum execution time: 18_821_000 picoseconds. + Weight::from_parts(19_633_000, 4026) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Members` (r:1 w:0) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Payouts` (r:1 w:1) + /// Proof: `Society::Payouts` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout() -> Weight { - Weight::zero() - } - // Storage: Society Members (r:1 w:1) - // Storage: Society Payouts (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `650` + // Estimated: `4115` + // Minimum execution time: 47_262_000 picoseconds. + Weight::from_parts(48_313_000, 4115) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Members` (r:1 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Payouts` (r:1 w:1) + /// Proof: `Society::Payouts` (`max_values`: None, `max_size`: None, mode: `Measured`) fn waive_repay() -> Weight { - Weight::zero() - } - // Storage: Society Head (r:1 w:1) - // Storage: Society MemberCount (r:1 w:1) - // Storage: Society MemberByIndex (r:0 w:1) - // Storage: Society Founder (r:0 w:1) - // Storage: Society Rules (r:0 w:1) - // Storage: Society Members (r:0 w:1) - // Storage: Society Parameters (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `547` + // Estimated: `4012` + // Minimum execution time: 18_189_000 picoseconds. + Weight::from_parts(19_038_000, 4012) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Head` (r:1 w:1) + /// Proof: `Society::Head` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:1) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberByIndex` (r:0 w:1) + /// Proof: `Society::MemberByIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Founder` (r:0 w:1) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Rules` (r:0 w:1) + /// Proof: `Society::Rules` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:0 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:0 w:1) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn found_society() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:1) - // Storage: Society MemberCount (r:1 w:1) - // Storage: Society Head (r:0 w:1) - // Storage: Society Defending (r:0 w:1) - // Storage: Society ChallengeRoundCount (r:0 w:1) - // Storage: Society MemberByIndex (r:0 w:5) - // Storage: Society Skeptic (r:0 w:1) - // Storage: Society Candidates (r:0 w:4) - // Storage: Society Pot (r:0 w:1) - // Storage: Society Rules (r:0 w:1) - // Storage: Society Votes (r:0 w:4) - // Storage: Society Members (r:0 w:5) - // Storage: Society RoundCount (r:0 w:1) - // Storage: Society Bids (r:0 w:1) - // Storage: Society Parameters (r:0 w:1) - // Storage: Society NextHead (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `1665` + // Minimum execution time: 14_815_000 picoseconds. + Weight::from_parts(15_426_000, 1665) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) + } + /// Storage: `Society::Founder` (r:1 w:1) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:1) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:5 w:5) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberByIndex` (r:5 w:5) + /// Proof: `Society::MemberByIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Votes` (r:4 w:4) + /// Proof: `Society::Votes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:4 w:4) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Head` (r:0 w:1) + /// Proof: `Society::Head` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Defending` (r:0 w:1) + /// Proof: `Society::Defending` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::ChallengeRoundCount` (r:0 w:1) + /// Proof: `Society::ChallengeRoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Skeptic` (r:0 w:1) + /// Proof: `Society::Skeptic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Pot` (r:0 w:1) + /// Proof: `Society::Pot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Rules` (r:0 w:1) + /// Proof: `Society::Rules` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:0 w:1) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Bids` (r:0 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:0 w:1) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::NextHead` (r:0 w:1) + /// Proof: `Society::NextHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn dissolve() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:0) - // Storage: Society SuspendedMembers (r:1 w:1) - // Storage: Society Payouts (r:1 w:0) - // Storage: Society Pot (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `1654` + // Estimated: `15019` + // Minimum execution time: 57_787_000 picoseconds. + Weight::from_parts(59_489_000, 15019) + .saturating_add(T::DbWeight::get().reads(20_u64)) + .saturating_add(T::DbWeight::get().writes(30_u64)) + } + /// Storage: `Society::Founder` (r:1 w:0) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::SuspendedMembers` (r:1 w:1) + /// Proof: `Society::SuspendedMembers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Payouts` (r:1 w:0) + /// Proof: `Society::Payouts` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Pot` (r:1 w:1) + /// Proof: `Society::Pot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn judge_suspended_member() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:0) - // Storage: Society MemberCount (r:1 w:0) - // Storage: Society Parameters (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `505` + // Estimated: `3970` + // Minimum execution time: 19_262_000 picoseconds. + Weight::from_parts(19_752_000, 3970) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Founder` (r:1 w:0) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:0) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:0 w:1) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_parameters() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) - // Storage: Society Skeptic (r:1 w:0) - // Storage: Society Votes (r:1 w:0) - // Storage: Society Members (r:1 w:1) - // Storage: Society Parameters (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `387` + // Estimated: `1872` + // Minimum execution time: 10_656_000 picoseconds. + Weight::from_parts(11_063_000, 1872) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Skeptic` (r:1 w:0) + /// Proof: `Society::Skeptic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Votes` (r:1 w:0) + /// Proof: `Society::Votes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:1 w:0) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn punish_skeptic() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) - // Storage: Society Parameters (r:1 w:0) - // Storage: Society MemberCount (r:1 w:1) - // Storage: Society NextHead (r:1 w:1) - // Storage: System Account (r:1 w:1) - // Storage: Society MemberByIndex (r:0 w:1) - // Storage: Society Members (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `636` + // Estimated: `4101` + // Minimum execution time: 22_837_000 picoseconds. + Weight::from_parts(23_738_000, 4101) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:1 w:0) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:1) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::NextHead` (r:1 w:1) + /// Proof: `Society::NextHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Society::MemberByIndex` (r:0 w:1) + /// Proof: `Society::MemberByIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:0 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_membership() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:0) - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) - // Storage: Society Parameters (r:1 w:0) - // Storage: Society MemberCount (r:1 w:1) - // Storage: Society NextHead (r:1 w:1) - // Storage: System Account (r:1 w:1) - // Storage: Society MemberByIndex (r:0 w:1) - // Storage: Society Members (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `632` + // Estimated: `4097` + // Minimum execution time: 35_142_000 picoseconds. + Weight::from_parts(36_811_000, 4097) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `Society::Founder` (r:1 w:0) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:1 w:0) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:1) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::NextHead` (r:1 w:1) + /// Proof: `Society::NextHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Society::MemberByIndex` (r:0 w:1) + /// Proof: `Society::MemberByIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:0 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) fn bestow_membership() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:0) - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `650` + // Estimated: `4115` + // Minimum execution time: 37_133_000 picoseconds. + Weight::from_parts(38_366_000, 4115) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `Society::Founder` (r:1 w:0) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn kick_candidate() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `776` + // Estimated: `6196` + // Minimum execution time: 37_033_000 picoseconds. + Weight::from_parts(38_293_000, 6196) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn resign_candidacy() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `746` + // Estimated: `6196` + // Minimum execution time: 35_203_000 picoseconds. + Weight::from_parts(36_252_000, 6196) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_candidate() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:0) - // Storage: Society VoteClearCursor (r:1 w:0) - // Storage: Society Votes (r:0 w:2) + // Proof Size summary in bytes: + // Measured: `758` + // Estimated: `6196` + // Minimum execution time: 35_518_000 picoseconds. + Weight::from_parts(36_508_000, 6196) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:0) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::VoteClearCursor` (r:1 w:0) + /// Proof: `Society::VoteClearCursor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Votes` (r:2 w:2) + /// Proof: `Society::Votes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn cleanup_candidacy() -> Weight { - Weight::zero() - } - // Storage: Society ChallengeRoundCount (r:1 w:0) - // Storage: Society DefenderVotes (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `552` + // Estimated: `6492` + // Minimum execution time: 17_001_000 picoseconds. + Weight::from_parts(17_845_000, 6492) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Society::ChallengeRoundCount` (r:1 w:0) + /// Proof: `Society::ChallengeRoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::DefenderVotes` (r:1 w:1) + /// Proof: `Society::DefenderVotes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn cleanup_challenge() -> Weight { - Weight::zero() + // Proof Size summary in bytes: + // Measured: `510` + // Estimated: `3975` + // Minimum execution time: 11_583_000 picoseconds. + Weight::from_parts(12_134_000, 3975) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - // Storage: Society Bids (r:1 w:1) - // Storage: Society Candidates (r:1 w:0) - // Storage: Society Members (r:1 w:0) - // Storage: Society SuspendedMembers (r:1 w:0) - // Storage: Society Parameters (r:1 w:0) + /// Storage: `Society::Bids` (r:1 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:1 w:0) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:0) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::SuspendedMembers` (r:1 w:0) + /// Proof: `Society::SuspendedMembers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:1 w:0) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn bid() -> Weight { - Weight::zero() - } - // Storage: Society Bids (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `444` + // Estimated: `3909` + // Minimum execution time: 29_905_000 picoseconds. + Weight::from_parts(31_031_000, 3909) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Society::Bids` (r:1 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn unbid() -> Weight { - Weight::zero() - } - // Storage: Society Bids (r:1 w:1) - // Storage: Society Candidates (r:1 w:0) - // Storage: Society Members (r:2 w:1) - // Storage: Society SuspendedMembers (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `461` + // Estimated: `1946` + // Minimum execution time: 23_038_000 picoseconds. + Weight::from_parts(23_904_000, 1946) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Society::Bids` (r:1 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:1 w:0) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:2 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::SuspendedMembers` (r:1 w:0) + /// Proof: `Society::SuspendedMembers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn vouch() -> Weight { - Weight::zero() - } - // Storage: Society Bids (r:1 w:1) - // Storage: Society Members (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `481` + // Estimated: `6421` + // Minimum execution time: 21_197_000 picoseconds. + Weight::from_parts(22_043_000, 6421) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Bids` (r:1 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) fn unvouch() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society Members (r:1 w:0) - // Storage: Society Votes (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `535` + // Estimated: `4000` + // Minimum execution time: 14_402_000 picoseconds. + Weight::from_parts(15_171_000, 4000) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:0) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Votes` (r:1 w:1) + /// Proof: `Society::Votes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn vote() -> Weight { - Weight::zero() - } - // Storage: Society Defending (r:1 w:1) - // Storage: Society Members (r:1 w:0) - // Storage: Society ChallengeRoundCount (r:1 w:0) - // Storage: Society DefenderVotes (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `569` + // Estimated: `4034` + // Minimum execution time: 21_930_000 picoseconds. + Weight::from_parts(22_666_000, 4034) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Defending` (r:1 w:1) + /// Proof: `Society::Defending` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:0) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::ChallengeRoundCount` (r:1 w:0) + /// Proof: `Society::ChallengeRoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::DefenderVotes` (r:1 w:1) + /// Proof: `Society::DefenderVotes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn defender_vote() -> Weight { - Weight::zero() - } - // Storage: Society Members (r:1 w:0) - // Storage: Society Payouts (r:1 w:1) - // Storage: System Account (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `561` + // Estimated: `4026` + // Minimum execution time: 18_821_000 picoseconds. + Weight::from_parts(19_633_000, 4026) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Members` (r:1 w:0) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Payouts` (r:1 w:1) + /// Proof: `Society::Payouts` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout() -> Weight { - Weight::zero() - } - // Storage: Society Members (r:1 w:1) - // Storage: Society Payouts (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `650` + // Estimated: `4115` + // Minimum execution time: 47_262_000 picoseconds. + Weight::from_parts(48_313_000, 4115) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Members` (r:1 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Payouts` (r:1 w:1) + /// Proof: `Society::Payouts` (`max_values`: None, `max_size`: None, mode: `Measured`) fn waive_repay() -> Weight { - Weight::zero() - } - // Storage: Society Head (r:1 w:1) - // Storage: Society MemberCount (r:1 w:1) - // Storage: Society MemberByIndex (r:0 w:1) - // Storage: Society Founder (r:0 w:1) - // Storage: Society Rules (r:0 w:1) - // Storage: Society Members (r:0 w:1) - // Storage: Society Parameters (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `547` + // Estimated: `4012` + // Minimum execution time: 18_189_000 picoseconds. + Weight::from_parts(19_038_000, 4012) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Head` (r:1 w:1) + /// Proof: `Society::Head` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:1) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberByIndex` (r:0 w:1) + /// Proof: `Society::MemberByIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Founder` (r:0 w:1) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Rules` (r:0 w:1) + /// Proof: `Society::Rules` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:0 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:0 w:1) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn found_society() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:1) - // Storage: Society MemberCount (r:1 w:1) - // Storage: Society Head (r:0 w:1) - // Storage: Society Defending (r:0 w:1) - // Storage: Society ChallengeRoundCount (r:0 w:1) - // Storage: Society MemberByIndex (r:0 w:5) - // Storage: Society Skeptic (r:0 w:1) - // Storage: Society Candidates (r:0 w:4) - // Storage: Society Pot (r:0 w:1) - // Storage: Society Rules (r:0 w:1) - // Storage: Society Votes (r:0 w:4) - // Storage: Society Members (r:0 w:5) - // Storage: Society RoundCount (r:0 w:1) - // Storage: Society Bids (r:0 w:1) - // Storage: Society Parameters (r:0 w:1) - // Storage: Society NextHead (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `1665` + // Minimum execution time: 14_815_000 picoseconds. + Weight::from_parts(15_426_000, 1665) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) + } + /// Storage: `Society::Founder` (r:1 w:1) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:1) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:5 w:5) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberByIndex` (r:5 w:5) + /// Proof: `Society::MemberByIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Votes` (r:4 w:4) + /// Proof: `Society::Votes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:4 w:4) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Head` (r:0 w:1) + /// Proof: `Society::Head` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Defending` (r:0 w:1) + /// Proof: `Society::Defending` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::ChallengeRoundCount` (r:0 w:1) + /// Proof: `Society::ChallengeRoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Skeptic` (r:0 w:1) + /// Proof: `Society::Skeptic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Pot` (r:0 w:1) + /// Proof: `Society::Pot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Rules` (r:0 w:1) + /// Proof: `Society::Rules` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:0 w:1) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Bids` (r:0 w:1) + /// Proof: `Society::Bids` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:0 w:1) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::NextHead` (r:0 w:1) + /// Proof: `Society::NextHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn dissolve() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:0) - // Storage: Society SuspendedMembers (r:1 w:1) - // Storage: Society Payouts (r:1 w:0) - // Storage: Society Pot (r:1 w:1) + // Proof Size summary in bytes: + // Measured: `1654` + // Estimated: `15019` + // Minimum execution time: 57_787_000 picoseconds. + Weight::from_parts(59_489_000, 15019) + .saturating_add(RocksDbWeight::get().reads(20_u64)) + .saturating_add(RocksDbWeight::get().writes(30_u64)) + } + /// Storage: `Society::Founder` (r:1 w:0) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::SuspendedMembers` (r:1 w:1) + /// Proof: `Society::SuspendedMembers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Payouts` (r:1 w:0) + /// Proof: `Society::Payouts` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Pot` (r:1 w:1) + /// Proof: `Society::Pot` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn judge_suspended_member() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:0) - // Storage: Society MemberCount (r:1 w:0) - // Storage: Society Parameters (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `505` + // Estimated: `3970` + // Minimum execution time: 19_262_000 picoseconds. + Weight::from_parts(19_752_000, 3970) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Founder` (r:1 w:0) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:0) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:0 w:1) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_parameters() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) - // Storage: Society Skeptic (r:1 w:0) - // Storage: Society Votes (r:1 w:0) - // Storage: Society Members (r:1 w:1) - // Storage: Society Parameters (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `387` + // Estimated: `1872` + // Minimum execution time: 10_656_000 picoseconds. + Weight::from_parts(11_063_000, 1872) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Skeptic` (r:1 w:0) + /// Proof: `Society::Skeptic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Votes` (r:1 w:0) + /// Proof: `Society::Votes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:1 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:1 w:0) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn punish_skeptic() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) - // Storage: Society Parameters (r:1 w:0) - // Storage: Society MemberCount (r:1 w:1) - // Storage: Society NextHead (r:1 w:1) - // Storage: System Account (r:1 w:1) - // Storage: Society MemberByIndex (r:0 w:1) - // Storage: Society Members (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `636` + // Estimated: `4101` + // Minimum execution time: 22_837_000 picoseconds. + Weight::from_parts(23_738_000, 4101) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:1 w:0) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:1) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::NextHead` (r:1 w:1) + /// Proof: `Society::NextHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Society::MemberByIndex` (r:0 w:1) + /// Proof: `Society::MemberByIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:0 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_membership() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:0) - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) - // Storage: Society Parameters (r:1 w:0) - // Storage: Society MemberCount (r:1 w:1) - // Storage: Society NextHead (r:1 w:1) - // Storage: System Account (r:1 w:1) - // Storage: Society MemberByIndex (r:0 w:1) - // Storage: Society Members (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `632` + // Estimated: `4097` + // Minimum execution time: 35_142_000 picoseconds. + Weight::from_parts(36_811_000, 4097) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + /// Storage: `Society::Founder` (r:1 w:0) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Parameters` (r:1 w:0) + /// Proof: `Society::Parameters` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::MemberCount` (r:1 w:1) + /// Proof: `Society::MemberCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::NextHead` (r:1 w:1) + /// Proof: `Society::NextHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Society::MemberByIndex` (r:0 w:1) + /// Proof: `Society::MemberByIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Members` (r:0 w:1) + /// Proof: `Society::Members` (`max_values`: None, `max_size`: None, mode: `Measured`) fn bestow_membership() -> Weight { - Weight::zero() - } - // Storage: Society Founder (r:1 w:0) - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `650` + // Estimated: `4115` + // Minimum execution time: 37_133_000 picoseconds. + Weight::from_parts(38_366_000, 4115) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + /// Storage: `Society::Founder` (r:1 w:0) + /// Proof: `Society::Founder` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn kick_candidate() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `776` + // Estimated: `6196` + // Minimum execution time: 37_033_000 picoseconds. + Weight::from_parts(38_293_000, 6196) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn resign_candidacy() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:1) - // Storage: Society RoundCount (r:1 w:0) + // Proof Size summary in bytes: + // Measured: `746` + // Estimated: `6196` + // Minimum execution time: 35_203_000 picoseconds. + Weight::from_parts(36_252_000, 6196) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:1) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::RoundCount` (r:1 w:0) + /// Proof: `Society::RoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_candidate() -> Weight { - Weight::zero() - } - // Storage: Society Candidates (r:1 w:0) - // Storage: Society VoteClearCursor (r:1 w:0) - // Storage: Society Votes (r:0 w:2) + // Proof Size summary in bytes: + // Measured: `758` + // Estimated: `6196` + // Minimum execution time: 35_518_000 picoseconds. + Weight::from_parts(36_508_000, 6196) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `Society::Candidates` (r:1 w:0) + /// Proof: `Society::Candidates` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::VoteClearCursor` (r:1 w:0) + /// Proof: `Society::VoteClearCursor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Society::Votes` (r:2 w:2) + /// Proof: `Society::Votes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn cleanup_candidacy() -> Weight { - Weight::zero() - } - // Storage: Society ChallengeRoundCount (r:1 w:0) - // Storage: Society DefenderVotes (r:0 w:1) + // Proof Size summary in bytes: + // Measured: `552` + // Estimated: `6492` + // Minimum execution time: 17_001_000 picoseconds. + Weight::from_parts(17_845_000, 6492) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Society::ChallengeRoundCount` (r:1 w:0) + /// Proof: `Society::ChallengeRoundCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Society::DefenderVotes` (r:1 w:1) + /// Proof: `Society::DefenderVotes` (`max_values`: None, `max_size`: None, mode: `Measured`) fn cleanup_challenge() -> Weight { - Weight::zero() + // Proof Size summary in bytes: + // Measured: `510` + // Estimated: `3975` + // Minimum execution time: 11_583_000 picoseconds. + Weight::from_parts(12_134_000, 3975) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index 52db7c34bfd..549e177491d 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -249,8 +249,8 @@ pub mod runtime { /// The block type, which should be fed into [`frame_system::Config`]. /// - /// Should be parameterized with `T: frame_system::Config` and a tuple of `SignedExtension`. - /// When in doubt, use [`SystemSignedExtensionsOf`]. + /// Should be parameterized with `T: frame_system::Config` and a tuple of + /// `TransactionExtension`. When in doubt, use [`SystemTransactionExtensionsOf`]. // Note that this cannot be dependent on `T` for block-number because it would lead to a // circular dependency (self-referential generics). pub type BlockOf = generic::Block>; @@ -264,7 +264,7 @@ pub mod runtime { /// Default set of signed extensions exposed from the `frame_system`. /// /// crucially, this does NOT contain any tx-payment extension. - pub type SystemSignedExtensionsOf = ( + pub type SystemTransactionExtensionsOf = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs index 6f729e08ba5..8288591a787 100644 --- a/substrate/frame/staking/src/weights.rs +++ b/substrate/frame/staking/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_staking` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-q7z7ruxr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_staking +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_staking -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/staking/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -99,8 +101,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `927` // Estimated: `4764` - // Minimum execution time: 42_042_000 picoseconds. - Weight::from_parts(43_292_000, 4764) + // Minimum execution time: 41_318_000 picoseconds. + Weight::from_parts(43_268_000, 4764) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -120,8 +122,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 85_050_000 picoseconds. - Weight::from_parts(87_567_000, 8877) + // Minimum execution time: 85_666_000 picoseconds. + Weight::from_parts(88_749_000, 8877) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -147,8 +149,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 89_076_000 picoseconds. - Weight::from_parts(92_715_000, 8877) + // Minimum execution time: 90_282_000 picoseconds. + Weight::from_parts(92_332_000, 8877) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -162,16 +164,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1115` + // Measured: `1297` // Estimated: `4764` - // Minimum execution time: 42_067_000 picoseconds. - Weight::from_parts(43_239_807, 4764) - // Standard Error: 831 - .saturating_add(Weight::from_parts(46_257, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Minimum execution time: 44_626_000 picoseconds. + Weight::from_parts(47_254_657, 4764) + // Standard Error: 1_179 + .saturating_add(Weight::from_parts(57_657, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) @@ -207,10 +211,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 Β±0)` // Estimated: `6248 + s * (4 Β±0)` - // Minimum execution time: 86_490_000 picoseconds. - Weight::from_parts(95_358_751, 6248) - // Standard Error: 3_952 - .saturating_add(Weight::from_parts(1_294_907, 0).saturating_mul(s.into())) + // Minimum execution time: 86_769_000 picoseconds. + Weight::from_parts(95_212_867, 6248) + // Standard Error: 3_706 + .saturating_add(Weight::from_parts(1_320_752, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -242,8 +246,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 50_326_000 picoseconds. - Weight::from_parts(52_253_000, 4556) + // Minimum execution time: 50_410_000 picoseconds. + Weight::from_parts(52_576_000, 4556) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -256,10 +260,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1280 + k * (569 Β±0)` // Estimated: `4556 + k * (3033 Β±0)` - // Minimum execution time: 29_305_000 picoseconds. - Weight::from_parts(32_199_604, 4556) - // Standard Error: 7_150 - .saturating_add(Weight::from_parts(6_437_124, 0).saturating_mul(k.into())) + // Minimum execution time: 29_017_000 picoseconds. + Weight::from_parts(33_019_206, 4556) + // Standard Error: 6_368 + .saturating_add(Weight::from_parts(6_158_681, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -292,10 +296,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1866 + n * (102 Β±0)` // Estimated: `6248 + n * (2520 Β±0)` - // Minimum execution time: 63_267_000 picoseconds. - Weight::from_parts(61_741_404, 6248) - // Standard Error: 12_955 - .saturating_add(Weight::from_parts(3_811_743, 0).saturating_mul(n.into())) + // Minimum execution time: 63_452_000 picoseconds. + Weight::from_parts(60_924_066, 6248) + // Standard Error: 14_416 + .saturating_add(Weight::from_parts(3_921_589, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -319,8 +323,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `6248` - // Minimum execution time: 52_862_000 picoseconds. - Weight::from_parts(54_108_000, 6248) + // Minimum execution time: 54_253_000 picoseconds. + Weight::from_parts(55_286_000, 6248) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -334,8 +338,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 16_350_000 picoseconds. - Weight::from_parts(16_802_000, 4556) + // Minimum execution time: 16_812_000 picoseconds. + Weight::from_parts(17_380_000, 4556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -349,8 +353,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `969` // Estimated: `4556` - // Minimum execution time: 19_981_000 picoseconds. - Weight::from_parts(20_539_000, 4556) + // Minimum execution time: 20_590_000 picoseconds. + Weight::from_parts(21_096_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -362,8 +366,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 19_304_000 picoseconds. - Weight::from_parts(20_000_000, 4556) + // Minimum execution time: 19_923_000 picoseconds. + Weight::from_parts(20_880_000, 4556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -373,8 +377,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_568_000 picoseconds. - Weight::from_parts(2_708_000, 0) + // Minimum execution time: 2_599_000 picoseconds. + Weight::from_parts(2_751_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -383,8 +387,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_950_000 picoseconds. - Weight::from_parts(8_348_000, 0) + // Minimum execution time: 6_941_000 picoseconds. + Weight::from_parts(7_288_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -393,8 +397,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_967_000 picoseconds. - Weight::from_parts(8_222_000, 0) + // Minimum execution time: 6_757_000 picoseconds. + Weight::from_parts(7_200_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -403,8 +407,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_006_000 picoseconds. - Weight::from_parts(8_440_000, 0) + // Minimum execution time: 7_028_000 picoseconds. + Weight::from_parts(7_366_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::Invulnerables` (r:0 w:1) @@ -414,10 +418,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_524_000 picoseconds. - Weight::from_parts(3_123_608, 0) - // Standard Error: 59 - .saturating_add(Weight::from_parts(11_596, 0).saturating_mul(v.into())) + // Minimum execution time: 2_741_000 picoseconds. + Weight::from_parts(3_212_598, 0) + // Standard Error: 68 + .saturating_add(Weight::from_parts(11_695, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::Ledger` (r:5900 w:11800) @@ -431,10 +435,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1356 + i * (151 Β±0)` // Estimated: `990 + i * (3566 Β±0)` - // Minimum execution time: 2_092_000 picoseconds. - Weight::from_parts(2_258_000, 990) - // Standard Error: 32_695 - .saturating_add(Weight::from_parts(16_669_219, 0).saturating_mul(i.into())) + // Minimum execution time: 2_132_000 picoseconds. + Weight::from_parts(2_289_000, 990) + // Standard Error: 34_227 + .saturating_add(Weight::from_parts(17_006_583, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(i.into())) @@ -472,10 +476,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 Β±0)` // Estimated: `6248 + s * (4 Β±0)` - // Minimum execution time: 84_275_000 picoseconds. - Weight::from_parts(92_512_416, 6248) - // Standard Error: 3_633 - .saturating_add(Weight::from_parts(1_315_923, 0).saturating_mul(s.into())) + // Minimum execution time: 85_308_000 picoseconds. + Weight::from_parts(93_689_468, 6248) + // Standard Error: 5_425 + .saturating_add(Weight::from_parts(1_307_604, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -488,10 +492,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `66672` // Estimated: `70137` - // Minimum execution time: 101_707_000 picoseconds. - Weight::from_parts(912_819_462, 70137) - // Standard Error: 57_547 - .saturating_add(Weight::from_parts(4_856_799, 0).saturating_mul(s.into())) + // Minimum execution time: 103_242_000 picoseconds. + Weight::from_parts(1_162_296_080, 70137) + // Standard Error: 76_741 + .saturating_add(Weight::from_parts(6_487_522, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -528,10 +532,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `33297 + n * (377 Β±0)` // Estimated: `30944 + n * (3774 Β±0)` - // Minimum execution time: 138_657_000 picoseconds. - Weight::from_parts(167_173_445, 30944) - // Standard Error: 25_130 - .saturating_add(Weight::from_parts(44_566_012, 0).saturating_mul(n.into())) + // Minimum execution time: 140_740_000 picoseconds. + Weight::from_parts(182_886_963, 30944) + // Standard Error: 39_852 + .saturating_add(Weight::from_parts(43_140_752, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -555,10 +559,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1991 + l * (7 Β±0)` // Estimated: `8877` - // Minimum execution time: 80_061_000 picoseconds. - Weight::from_parts(82_836_434, 8877) - // Standard Error: 4_348 - .saturating_add(Weight::from_parts(75_744, 0).saturating_mul(l.into())) + // Minimum execution time: 80_372_000 picoseconds. + Weight::from_parts(83_335_027, 8877) + // Standard Error: 4_780 + .saturating_add(Weight::from_parts(72_180, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -593,10 +597,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 Β±0)` // Estimated: `6248 + s * (4 Β±0)` - // Minimum execution time: 92_560_000 picoseconds. - Weight::from_parts(97_684_741, 6248) - // Standard Error: 3_361 - .saturating_add(Weight::from_parts(1_292_732, 0).saturating_mul(s.into())) + // Minimum execution time: 93_920_000 picoseconds. + Weight::from_parts(98_022_911, 6248) + // Standard Error: 4_096 + .saturating_add(Weight::from_parts(1_287_745, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -642,12 +646,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + n * (720 Β±0) + v * (3598 Β±0)` // Estimated: `512390 + n * (3566 Β±0) + v * (3566 Β±0)` - // Minimum execution time: 564_963_000 picoseconds. - Weight::from_parts(569_206_000, 512390) - // Standard Error: 2_033_235 - .saturating_add(Weight::from_parts(68_025_841, 0).saturating_mul(v.into())) - // Standard Error: 202_600 - .saturating_add(Weight::from_parts(17_916_770, 0).saturating_mul(n.into())) + // Minimum execution time: 556_012_000 picoseconds. + Weight::from_parts(560_339_000, 512390) + // Standard Error: 2_115_076 + .saturating_add(Weight::from_parts(69_456_497, 0).saturating_mul(v.into())) + // Standard Error: 210_755 + .saturating_add(Weight::from_parts(17_974_873, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(206_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -678,12 +682,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3175 + n * (911 Β±0) + v * (395 Β±0)` // Estimated: `512390 + n * (3566 Β±0) + v * (3566 Β±0)` - // Minimum execution time: 32_196_540_000 picoseconds. - Weight::from_parts(32_341_871_000, 512390) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(5_143_440, 0).saturating_mul(v.into())) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(3_328_189, 0).saturating_mul(n.into())) + // Minimum execution time: 32_792_819_000 picoseconds. + Weight::from_parts(32_986_606_000, 512390) + // Standard Error: 360_905 + .saturating_add(Weight::from_parts(5_260_151, 0).saturating_mul(v.into())) + // Standard Error: 360_905 + .saturating_add(Weight::from_parts(3_599_219, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(201_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -700,10 +704,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `979 + v * (50 Β±0)` // Estimated: `3510 + v * (2520 Β±0)` - // Minimum execution time: 2_381_903_000 picoseconds. - Weight::from_parts(32_693_059, 3510) - // Standard Error: 10_000 - .saturating_add(Weight::from_parts(4_736_173, 0).saturating_mul(v.into())) + // Minimum execution time: 2_434_755_000 picoseconds. + Weight::from_parts(38_574_764, 3510) + // Standard Error: 14_467 + .saturating_add(Weight::from_parts(4_821_702, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -714,6 +718,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -724,9 +730,9 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_434_000 picoseconds. - Weight::from_parts(5_742_000, 0) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Minimum execution time: 5_765_000 picoseconds. + Weight::from_parts(6_140_000, 0) + .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Staking::MinCommission` (r:0 w:1) /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -734,6 +740,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -744,9 +752,9 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_588_000 picoseconds. - Weight::from_parts(4_854_000, 0) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Minimum execution time: 5_025_000 picoseconds. + Weight::from_parts(5_354_000, 0) + .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) @@ -774,8 +782,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1939` // Estimated: `6248` - // Minimum execution time: 68_780_000 picoseconds. - Weight::from_parts(71_479_000, 6248) + // Minimum execution time: 69_656_000 picoseconds. + Weight::from_parts(71_574_000, 6248) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -787,8 +795,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `691` // Estimated: `3510` - // Minimum execution time: 12_268_000 picoseconds. - Weight::from_parts(12_661_000, 3510) + // Minimum execution time: 12_523_000 picoseconds. + Weight::from_parts(13_315_000, 3510) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -798,8 +806,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_071_000 picoseconds. - Weight::from_parts(3_334_000, 0) + // Minimum execution time: 3_125_000 picoseconds. + Weight::from_parts(3_300_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -820,8 +828,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `927` // Estimated: `4764` - // Minimum execution time: 42_042_000 picoseconds. - Weight::from_parts(43_292_000, 4764) + // Minimum execution time: 41_318_000 picoseconds. + Weight::from_parts(43_268_000, 4764) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -841,8 +849,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 85_050_000 picoseconds. - Weight::from_parts(87_567_000, 8877) + // Minimum execution time: 85_666_000 picoseconds. + Weight::from_parts(88_749_000, 8877) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -868,8 +876,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 89_076_000 picoseconds. - Weight::from_parts(92_715_000, 8877) + // Minimum execution time: 90_282_000 picoseconds. + Weight::from_parts(92_332_000, 8877) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -883,16 +891,18 @@ impl WeightInfo for () { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1115` + // Measured: `1297` // Estimated: `4764` - // Minimum execution time: 42_067_000 picoseconds. - Weight::from_parts(43_239_807, 4764) - // Standard Error: 831 - .saturating_add(Weight::from_parts(46_257, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Minimum execution time: 44_626_000 picoseconds. + Weight::from_parts(47_254_657, 4764) + // Standard Error: 1_179 + .saturating_add(Weight::from_parts(57_657, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Staking::Ledger` (r:1 w:1) @@ -928,10 +938,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 Β±0)` // Estimated: `6248 + s * (4 Β±0)` - // Minimum execution time: 86_490_000 picoseconds. - Weight::from_parts(95_358_751, 6248) - // Standard Error: 3_952 - .saturating_add(Weight::from_parts(1_294_907, 0).saturating_mul(s.into())) + // Minimum execution time: 86_769_000 picoseconds. + Weight::from_parts(95_212_867, 6248) + // Standard Error: 3_706 + .saturating_add(Weight::from_parts(1_320_752, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -963,8 +973,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 50_326_000 picoseconds. - Weight::from_parts(52_253_000, 4556) + // Minimum execution time: 50_410_000 picoseconds. + Weight::from_parts(52_576_000, 4556) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -977,10 +987,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1280 + k * (569 Β±0)` // Estimated: `4556 + k * (3033 Β±0)` - // Minimum execution time: 29_305_000 picoseconds. - Weight::from_parts(32_199_604, 4556) - // Standard Error: 7_150 - .saturating_add(Weight::from_parts(6_437_124, 0).saturating_mul(k.into())) + // Minimum execution time: 29_017_000 picoseconds. + Weight::from_parts(33_019_206, 4556) + // Standard Error: 6_368 + .saturating_add(Weight::from_parts(6_158_681, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -1013,10 +1023,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1866 + n * (102 Β±0)` // Estimated: `6248 + n * (2520 Β±0)` - // Minimum execution time: 63_267_000 picoseconds. - Weight::from_parts(61_741_404, 6248) - // Standard Error: 12_955 - .saturating_add(Weight::from_parts(3_811_743, 0).saturating_mul(n.into())) + // Minimum execution time: 63_452_000 picoseconds. + Weight::from_parts(60_924_066, 6248) + // Standard Error: 14_416 + .saturating_add(Weight::from_parts(3_921_589, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -1040,8 +1050,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `6248` - // Minimum execution time: 52_862_000 picoseconds. - Weight::from_parts(54_108_000, 6248) + // Minimum execution time: 54_253_000 picoseconds. + Weight::from_parts(55_286_000, 6248) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1055,8 +1065,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 16_350_000 picoseconds. - Weight::from_parts(16_802_000, 4556) + // Minimum execution time: 16_812_000 picoseconds. + Weight::from_parts(17_380_000, 4556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1070,8 +1080,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `969` // Estimated: `4556` - // Minimum execution time: 19_981_000 picoseconds. - Weight::from_parts(20_539_000, 4556) + // Minimum execution time: 20_590_000 picoseconds. + Weight::from_parts(21_096_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1083,8 +1093,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 19_304_000 picoseconds. - Weight::from_parts(20_000_000, 4556) + // Minimum execution time: 19_923_000 picoseconds. + Weight::from_parts(20_880_000, 4556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1094,8 +1104,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_568_000 picoseconds. - Weight::from_parts(2_708_000, 0) + // Minimum execution time: 2_599_000 picoseconds. + Weight::from_parts(2_751_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1104,8 +1114,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_950_000 picoseconds. - Weight::from_parts(8_348_000, 0) + // Minimum execution time: 6_941_000 picoseconds. + Weight::from_parts(7_288_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1114,8 +1124,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_967_000 picoseconds. - Weight::from_parts(8_222_000, 0) + // Minimum execution time: 6_757_000 picoseconds. + Weight::from_parts(7_200_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1124,8 +1134,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_006_000 picoseconds. - Weight::from_parts(8_440_000, 0) + // Minimum execution time: 7_028_000 picoseconds. + Weight::from_parts(7_366_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::Invulnerables` (r:0 w:1) @@ -1135,10 +1145,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_524_000 picoseconds. - Weight::from_parts(3_123_608, 0) - // Standard Error: 59 - .saturating_add(Weight::from_parts(11_596, 0).saturating_mul(v.into())) + // Minimum execution time: 2_741_000 picoseconds. + Weight::from_parts(3_212_598, 0) + // Standard Error: 68 + .saturating_add(Weight::from_parts(11_695, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::Ledger` (r:5900 w:11800) @@ -1152,10 +1162,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1356 + i * (151 Β±0)` // Estimated: `990 + i * (3566 Β±0)` - // Minimum execution time: 2_092_000 picoseconds. - Weight::from_parts(2_258_000, 990) - // Standard Error: 32_695 - .saturating_add(Weight::from_parts(16_669_219, 0).saturating_mul(i.into())) + // Minimum execution time: 2_132_000 picoseconds. + Weight::from_parts(2_289_000, 990) + // Standard Error: 34_227 + .saturating_add(Weight::from_parts(17_006_583, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(i.into())) @@ -1193,10 +1203,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 Β±0)` // Estimated: `6248 + s * (4 Β±0)` - // Minimum execution time: 84_275_000 picoseconds. - Weight::from_parts(92_512_416, 6248) - // Standard Error: 3_633 - .saturating_add(Weight::from_parts(1_315_923, 0).saturating_mul(s.into())) + // Minimum execution time: 85_308_000 picoseconds. + Weight::from_parts(93_689_468, 6248) + // Standard Error: 5_425 + .saturating_add(Weight::from_parts(1_307_604, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1209,10 +1219,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `66672` // Estimated: `70137` - // Minimum execution time: 101_707_000 picoseconds. - Weight::from_parts(912_819_462, 70137) - // Standard Error: 57_547 - .saturating_add(Weight::from_parts(4_856_799, 0).saturating_mul(s.into())) + // Minimum execution time: 103_242_000 picoseconds. + Weight::from_parts(1_162_296_080, 70137) + // Standard Error: 76_741 + .saturating_add(Weight::from_parts(6_487_522, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1249,10 +1259,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `33297 + n * (377 Β±0)` // Estimated: `30944 + n * (3774 Β±0)` - // Minimum execution time: 138_657_000 picoseconds. - Weight::from_parts(167_173_445, 30944) - // Standard Error: 25_130 - .saturating_add(Weight::from_parts(44_566_012, 0).saturating_mul(n.into())) + // Minimum execution time: 140_740_000 picoseconds. + Weight::from_parts(182_886_963, 30944) + // Standard Error: 39_852 + .saturating_add(Weight::from_parts(43_140_752, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -1276,10 +1286,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1991 + l * (7 Β±0)` // Estimated: `8877` - // Minimum execution time: 80_061_000 picoseconds. - Weight::from_parts(82_836_434, 8877) - // Standard Error: 4_348 - .saturating_add(Weight::from_parts(75_744, 0).saturating_mul(l.into())) + // Minimum execution time: 80_372_000 picoseconds. + Weight::from_parts(83_335_027, 8877) + // Standard Error: 4_780 + .saturating_add(Weight::from_parts(72_180, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -1314,10 +1324,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 Β±0)` // Estimated: `6248 + s * (4 Β±0)` - // Minimum execution time: 92_560_000 picoseconds. - Weight::from_parts(97_684_741, 6248) - // Standard Error: 3_361 - .saturating_add(Weight::from_parts(1_292_732, 0).saturating_mul(s.into())) + // Minimum execution time: 93_920_000 picoseconds. + Weight::from_parts(98_022_911, 6248) + // Standard Error: 4_096 + .saturating_add(Weight::from_parts(1_287_745, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1363,12 +1373,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + n * (720 Β±0) + v * (3598 Β±0)` // Estimated: `512390 + n * (3566 Β±0) + v * (3566 Β±0)` - // Minimum execution time: 564_963_000 picoseconds. - Weight::from_parts(569_206_000, 512390) - // Standard Error: 2_033_235 - .saturating_add(Weight::from_parts(68_025_841, 0).saturating_mul(v.into())) - // Standard Error: 202_600 - .saturating_add(Weight::from_parts(17_916_770, 0).saturating_mul(n.into())) + // Minimum execution time: 556_012_000 picoseconds. + Weight::from_parts(560_339_000, 512390) + // Standard Error: 2_115_076 + .saturating_add(Weight::from_parts(69_456_497, 0).saturating_mul(v.into())) + // Standard Error: 210_755 + .saturating_add(Weight::from_parts(17_974_873, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(206_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1399,12 +1409,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3175 + n * (911 Β±0) + v * (395 Β±0)` // Estimated: `512390 + n * (3566 Β±0) + v * (3566 Β±0)` - // Minimum execution time: 32_196_540_000 picoseconds. - Weight::from_parts(32_341_871_000, 512390) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(5_143_440, 0).saturating_mul(v.into())) - // Standard Error: 354_657 - .saturating_add(Weight::from_parts(3_328_189, 0).saturating_mul(n.into())) + // Minimum execution time: 32_792_819_000 picoseconds. + Weight::from_parts(32_986_606_000, 512390) + // Standard Error: 360_905 + .saturating_add(Weight::from_parts(5_260_151, 0).saturating_mul(v.into())) + // Standard Error: 360_905 + .saturating_add(Weight::from_parts(3_599_219, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(201_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1421,10 +1431,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `979 + v * (50 Β±0)` // Estimated: `3510 + v * (2520 Β±0)` - // Minimum execution time: 2_381_903_000 picoseconds. - Weight::from_parts(32_693_059, 3510) - // Standard Error: 10_000 - .saturating_add(Weight::from_parts(4_736_173, 0).saturating_mul(v.into())) + // Minimum execution time: 2_434_755_000 picoseconds. + Weight::from_parts(38_574_764, 3510) + // Standard Error: 14_467 + .saturating_add(Weight::from_parts(4_821_702, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -1435,6 +1445,8 @@ impl WeightInfo for () { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -1445,9 +1457,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_434_000 picoseconds. - Weight::from_parts(5_742_000, 0) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + // Minimum execution time: 5_765_000 picoseconds. + Weight::from_parts(6_140_000, 0) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Staking::MinCommission` (r:0 w:1) /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -1455,6 +1467,8 @@ impl WeightInfo for () { /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::ChillThreshold` (r:0 w:1) /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) @@ -1465,9 +1479,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_588_000 picoseconds. - Weight::from_parts(4_854_000, 0) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + // Minimum execution time: 5_025_000 picoseconds. + Weight::from_parts(5_354_000, 0) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) @@ -1495,8 +1509,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1939` // Estimated: `6248` - // Minimum execution time: 68_780_000 picoseconds. - Weight::from_parts(71_479_000, 6248) + // Minimum execution time: 69_656_000 picoseconds. + Weight::from_parts(71_574_000, 6248) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1508,8 +1522,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `691` // Estimated: `3510` - // Minimum execution time: 12_268_000 picoseconds. - Weight::from_parts(12_661_000, 3510) + // Minimum execution time: 12_523_000 picoseconds. + Weight::from_parts(13_315_000, 3510) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1519,8 +1533,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_071_000 picoseconds. - Weight::from_parts(3_334_000, 0) + // Minimum execution time: 3_125_000 picoseconds. + Weight::from_parts(3_300_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs index 6b3aa9934e0..b89159ef3ec 100644 --- a/substrate/frame/state-trie-migration/src/lib.rs +++ b/substrate/frame/state-trie-migration/src/lib.rs @@ -1801,7 +1801,7 @@ mod remote_tests_local { use std::env::var as env_var; // we only use the hash type from this, so using the mock should be fine. - type Extrinsic = sp_runtime::testing::TestXt; + type Extrinsic = sp_runtime::generic::UncheckedExtrinsic; type Block = sp_runtime::testing::Block; #[tokio::test] diff --git a/substrate/frame/state-trie-migration/src/weights.rs b/substrate/frame/state-trie-migration/src/weights.rs index 8fa80b38957..23dad9e3380 100644 --- a/substrate/frame/state-trie-migration/src/weights.rs +++ b/substrate/frame/state-trie-migration/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_state_trie_migration` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_state_trie_migration +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_state_trie_migration -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/state-trie-migration/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -64,15 +66,15 @@ impl WeightInfo for SubstrateWeight { /// Storage: `StateTrieMigration::SignedMigrationMaxLimits` (r:1 w:0) /// Proof: `StateTrieMigration::SignedMigrationMaxLimits` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `StateTrieMigration::MigrationProcess` (r:1 w:1) /// Proof: `StateTrieMigration::MigrationProcess` (`max_values`: Some(1), `max_size`: Some(1042), added: 1537, mode: `MaxEncodedLen`) fn continue_migrate() -> Weight { // Proof Size summary in bytes: // Measured: `108` - // Estimated: `3640` - // Minimum execution time: 18_520_000 picoseconds. - Weight::from_parts(19_171_000, 3640) + // Estimated: `3658` + // Minimum execution time: 17_661_000 picoseconds. + Weight::from_parts(18_077_000, 3658) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -82,53 +84,53 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1493` - // Minimum execution time: 3_786_000 picoseconds. - Weight::from_parts(4_038_000, 1493) + // Minimum execution time: 4_036_000 picoseconds. + Weight::from_parts(4_267_000, 1493) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn migrate_custom_top_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `3640` - // Minimum execution time: 11_144_000 picoseconds. - Weight::from_parts(11_556_000, 3640) + // Estimated: `3658` + // Minimum execution time: 11_348_000 picoseconds. + Weight::from_parts(11_899_000, 3658) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x666f6f` (r:1 w:1) /// Proof: UNKNOWN KEY `0x666f6f` (r:1 w:1) fn migrate_custom_top_fail() -> Weight { // Proof Size summary in bytes: // Measured: `113` - // Estimated: `3640` - // Minimum execution time: 59_288_000 picoseconds. - Weight::from_parts(60_276_000, 3640) + // Estimated: `3658` + // Minimum execution time: 59_819_000 picoseconds. + Weight::from_parts(61_066_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn migrate_custom_child_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `3640` - // Minimum execution time: 11_258_000 picoseconds. - Weight::from_parts(11_626_000, 3640) + // Estimated: `3658` + // Minimum execution time: 11_424_000 picoseconds. + Weight::from_parts(11_765_000, 3658) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x666f6f` (r:1 w:1) /// Proof: UNKNOWN KEY `0x666f6f` (r:1 w:1) fn migrate_custom_child_fail() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `3640` - // Minimum execution time: 61_575_000 picoseconds. - Weight::from_parts(63_454_000, 3640) + // Estimated: `3658` + // Minimum execution time: 61_086_000 picoseconds. + Weight::from_parts(62_546_000, 3658) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -139,10 +141,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `197 + v * (1 Β±0)` // Estimated: `3662 + v * (1 Β±0)` - // Minimum execution time: 5_259_000 picoseconds. - Weight::from_parts(5_433_000, 3662) + // Minimum execution time: 5_375_000 picoseconds. + Weight::from_parts(5_552_000, 3662) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_159, 0).saturating_mul(v.into())) + .saturating_add(Weight::from_parts(1_146, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(v.into())) @@ -154,15 +156,15 @@ impl WeightInfo for () { /// Storage: `StateTrieMigration::SignedMigrationMaxLimits` (r:1 w:0) /// Proof: `StateTrieMigration::SignedMigrationMaxLimits` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: `StateTrieMigration::MigrationProcess` (r:1 w:1) /// Proof: `StateTrieMigration::MigrationProcess` (`max_values`: Some(1), `max_size`: Some(1042), added: 1537, mode: `MaxEncodedLen`) fn continue_migrate() -> Weight { // Proof Size summary in bytes: // Measured: `108` - // Estimated: `3640` - // Minimum execution time: 18_520_000 picoseconds. - Weight::from_parts(19_171_000, 3640) + // Estimated: `3658` + // Minimum execution time: 17_661_000 picoseconds. + Weight::from_parts(18_077_000, 3658) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -172,53 +174,53 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1493` - // Minimum execution time: 3_786_000 picoseconds. - Weight::from_parts(4_038_000, 1493) + // Minimum execution time: 4_036_000 picoseconds. + Weight::from_parts(4_267_000, 1493) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn migrate_custom_top_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `3640` - // Minimum execution time: 11_144_000 picoseconds. - Weight::from_parts(11_556_000, 3640) + // Estimated: `3658` + // Minimum execution time: 11_348_000 picoseconds. + Weight::from_parts(11_899_000, 3658) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x666f6f` (r:1 w:1) /// Proof: UNKNOWN KEY `0x666f6f` (r:1 w:1) fn migrate_custom_top_fail() -> Weight { // Proof Size summary in bytes: // Measured: `113` - // Estimated: `3640` - // Minimum execution time: 59_288_000 picoseconds. - Weight::from_parts(60_276_000, 3640) + // Estimated: `3658` + // Minimum execution time: 59_819_000 picoseconds. + Weight::from_parts(61_066_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) fn migrate_custom_child_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `3640` - // Minimum execution time: 11_258_000 picoseconds. - Weight::from_parts(11_626_000, 3640) + // Estimated: `3658` + // Minimum execution time: 11_424_000 picoseconds. + Weight::from_parts(11_765_000, 3658) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(175), added: 2650, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x666f6f` (r:1 w:1) /// Proof: UNKNOWN KEY `0x666f6f` (r:1 w:1) fn migrate_custom_child_fail() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `3640` - // Minimum execution time: 61_575_000 picoseconds. - Weight::from_parts(63_454_000, 3640) + // Estimated: `3658` + // Minimum execution time: 61_086_000 picoseconds. + Weight::from_parts(62_546_000, 3658) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -229,10 +231,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `197 + v * (1 Β±0)` // Estimated: `3662 + v * (1 Β±0)` - // Minimum execution time: 5_259_000 picoseconds. - Weight::from_parts(5_433_000, 3662) + // Minimum execution time: 5_375_000 picoseconds. + Weight::from_parts(5_552_000, 3662) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_159, 0).saturating_mul(v.into())) + .saturating_add(Weight::from_parts(1_146, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(v.into())) diff --git a/substrate/frame/sudo/src/benchmarking.rs b/substrate/frame/sudo/src/benchmarking.rs index e64233fe748..cdd7d707600 100644 --- a/substrate/frame/sudo/src/benchmarking.rs +++ b/substrate/frame/sudo/src/benchmarking.rs @@ -20,14 +20,23 @@ use super::*; use crate::Pallet; use frame_benchmarking::v2::*; +use frame_support::dispatch::DispatchInfo; use frame_system::RawOrigin; +use sp_runtime::traits::{AsSystemOriginSigner, DispatchTransaction, Dispatchable}; fn assert_last_event(generic_event: crate::Event) { let re: ::RuntimeEvent = generic_event.into(); frame_system::Pallet::::assert_last_event(re.into()); } -#[benchmarks(where ::RuntimeCall: From>)] +#[benchmarks(where + T: Send + Sync, + ::RuntimeCall: From>, + ::RuntimeCall: Dispatchable, + <::RuntimeCall as Dispatchable>::PostInfo: From<()>, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + AsSystemOriginSigner + Clone, +)] mod benchmarks { use super::*; @@ -85,5 +94,23 @@ mod benchmarks { assert_last_event::(Event::KeyRemoved {}); } + #[benchmark] + fn check_only_sudo_account() { + let caller: T::AccountId = whitelisted_caller(); + Key::::put(&caller); + + let call = frame_system::Call::remark { remark: vec![] }.into(); + let info = DispatchInfo { ..Default::default() }; + let ext = CheckOnlySudoAccount::::new(); + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(().into())) + .unwrap() + .is_ok()); + } + } + impl_benchmark_test_suite!(Pallet, crate::mock::new_bench_ext(), crate::mock::Test); } diff --git a/substrate/frame/sudo/src/extension.rs b/substrate/frame/sudo/src/extension.rs index e90286e5a7c..2cd8f945edd 100644 --- a/substrate/frame/sudo/src/extension.rs +++ b/substrate/frame/sudo/src/extension.rs @@ -20,10 +20,14 @@ use codec::{Decode, Encode}; use frame_support::{dispatch::DispatchInfo, ensure}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, SignedExtension}, + impl_tx_ext_default, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, TransactionExtension, + TransactionExtensionBase, + }, transaction_validity::{ - InvalidTransaction, TransactionPriority, TransactionValidity, TransactionValidityError, - UnknownTransaction, ValidTransaction, + InvalidTransaction, TransactionPriority, TransactionValidityError, UnknownTransaction, + ValidTransaction, }, }; use sp_std::{fmt, marker::PhantomData}; @@ -59,49 +63,61 @@ impl fmt::Debug for CheckOnlySudoAccount { } impl CheckOnlySudoAccount { - /// Creates new `SignedExtension` to check sudo key. + /// Creates new `TransactionExtension` to check sudo key. pub fn new() -> Self { Self::default() } } -impl SignedExtension for CheckOnlySudoAccount -where - ::RuntimeCall: Dispatchable, -{ +impl TransactionExtensionBase for CheckOnlySudoAccount { const IDENTIFIER: &'static str = "CheckOnlySudoAccount"; - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); + type Implicit = (); - fn additional_signed(&self) -> Result { - Ok(()) + fn weight(&self) -> frame_support::weights::Weight { + use crate::weights::WeightInfo; + T::WeightInfo::check_only_sudo_account() } +} +impl + TransactionExtension<::RuntimeCall, Context> for CheckOnlySudoAccount +where + ::RuntimeCall: Dispatchable, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + AsSystemOriginSigner + Clone, +{ + type Pre = (); + type Val = (); fn validate( &self, - who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, + origin: <::RuntimeCall as Dispatchable>::RuntimeOrigin, + _call: &::RuntimeCall, + info: &DispatchInfoOf<::RuntimeCall>, _len: usize, - ) -> TransactionValidity { + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> Result< + ( + ValidTransaction, + Self::Val, + <::RuntimeCall as Dispatchable>::RuntimeOrigin, + ), + TransactionValidityError, + > { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; let sudo_key: T::AccountId = Key::::get().ok_or(UnknownTransaction::CannotLookup)?; ensure!(*who == sudo_key, InvalidTransaction::BadSigner); - Ok(ValidTransaction { - priority: info.weight.ref_time() as TransactionPriority, - ..Default::default() - }) + Ok(( + ValidTransaction { + priority: info.weight.ref_time() as TransactionPriority, + ..Default::default() + }, + (), + origin, + )) } - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } + impl_tx_ext_default!(::RuntimeCall; Context; prepare); } diff --git a/substrate/frame/sudo/src/lib.rs b/substrate/frame/sudo/src/lib.rs index 2ebe4cb0157..2c953368b22 100644 --- a/substrate/frame/sudo/src/lib.rs +++ b/substrate/frame/sudo/src/lib.rs @@ -85,8 +85,8 @@ //! meant to be used by constructing runtime calls from outside the runtime. //! //! -//! This pallet also defines a [`SignedExtension`](sp_runtime::traits::SignedExtension) called -//! [`CheckOnlySudoAccount`] to ensure that only signed transactions by the sudo account are +//! This pallet also defines a [`TransactionExtension`](sp_runtime::traits::TransactionExtension) +//! called [`CheckOnlySudoAccount`] to ensure that only signed transactions by the sudo account are //! accepted by the transaction pool. The intended use of this signed extension is to prevent other //! accounts from spamming the transaction pool for the initial phase of a chain, during which //! developers may only want a sudo account to be able to make transactions. diff --git a/substrate/frame/sudo/src/weights.rs b/substrate/frame/sudo/src/weights.rs index 10d1a9f7a51..aa8f69fd4e6 100644 --- a/substrate/frame/sudo/src/weights.rs +++ b/substrate/frame/sudo/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_sudo` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_sudo +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_sudo -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/sudo/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -53,6 +55,7 @@ pub trait WeightInfo { fn sudo() -> Weight; fn sudo_as() -> Weight; fn remove_key() -> Weight; + fn check_only_sudo_account() -> Weight; } /// Weights for `pallet_sudo` using the Substrate node and recommended hardware. @@ -64,8 +67,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 9_600_000 picoseconds. - Weight::from_parts(10_076_000, 1517) + // Minimum execution time: 9_590_000 picoseconds. + Weight::from_parts(9_924_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -75,8 +78,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 10_453_000 picoseconds. - Weight::from_parts(10_931_000, 1517) + // Minimum execution time: 9_825_000 picoseconds. + Weight::from_parts(10_373_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Sudo::Key` (r:1 w:0) @@ -85,8 +88,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 10_202_000 picoseconds. - Weight::from_parts(10_800_000, 1517) + // Minimum execution time: 10_140_000 picoseconds. + Weight::from_parts(10_382_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Sudo::Key` (r:1 w:1) @@ -95,11 +98,21 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 8_555_000 picoseconds. - Weight::from_parts(8_846_000, 1517) + // Minimum execution time: 8_610_000 picoseconds. + Weight::from_parts(8_975_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `165` + // Estimated: `1517` + // Minimum execution time: 3_416_000 picoseconds. + Weight::from_parts(3_645_000, 1517) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } } // For backwards compatibility and tests. @@ -110,8 +123,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 9_600_000 picoseconds. - Weight::from_parts(10_076_000, 1517) + // Minimum execution time: 9_590_000 picoseconds. + Weight::from_parts(9_924_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -121,8 +134,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 10_453_000 picoseconds. - Weight::from_parts(10_931_000, 1517) + // Minimum execution time: 9_825_000 picoseconds. + Weight::from_parts(10_373_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Sudo::Key` (r:1 w:0) @@ -131,8 +144,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 10_202_000 picoseconds. - Weight::from_parts(10_800_000, 1517) + // Minimum execution time: 10_140_000 picoseconds. + Weight::from_parts(10_382_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Sudo::Key` (r:1 w:1) @@ -141,9 +154,19 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 8_555_000 picoseconds. - Weight::from_parts(8_846_000, 1517) + // Minimum execution time: 8_610_000 picoseconds. + Weight::from_parts(8_975_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `165` + // Estimated: `1517` + // Minimum execution time: 3_416_000 picoseconds. + Weight::from_parts(3_645_000, 1517) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index fb9cb3d4511..113af41751e 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -95,9 +95,9 @@ std = [ "sp-staking/std", "sp-state-machine/std", "sp-std/std", + "sp-timestamp/std", "sp-tracing/std", "sp-weights/std", - "sp-timestamp/std" ] runtime-benchmarks = [ "frame-system/runtime-benchmarks", diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs index da483fa6cf0..d21cfef4a92 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -73,11 +73,9 @@ pub fn expand_outer_inherent( #( #pallet_attrs if let Some(inherent) = #pallet_names::create_inherent(self) { - let inherent = <#unchecked_extrinsic as #scrate::sp_runtime::traits::Extrinsic>::new( + let inherent = <#unchecked_extrinsic as #scrate::sp_runtime::traits::Extrinsic>::new_inherent( inherent.into(), - None, - ).expect("Runtime UncheckedExtrinsic is not Opaque, so it has to return \ - `Some`; qed"); + ); inherents.push(inherent); } @@ -123,7 +121,7 @@ pub fn expand_outer_inherent( for xt in block.extrinsics() { // Inherents are before any other extrinsics. // And signed extrinsics are not inherents. - if #scrate::sp_runtime::traits::Extrinsic::is_signed(xt).unwrap_or(false) { + if !(#scrate::sp_runtime::traits::Extrinsic::is_bare(xt)) { break } @@ -161,10 +159,9 @@ pub fn expand_outer_inherent( match #pallet_names::is_inherent_required(self) { Ok(Some(e)) => { let found = block.extrinsics().iter().any(|xt| { - let is_signed = #scrate::sp_runtime::traits::Extrinsic::is_signed(xt) - .unwrap_or(false); + let is_bare = #scrate::sp_runtime::traits::Extrinsic::is_bare(xt); - if !is_signed { + if is_bare { let call = < #unchecked_extrinsic as ExtrinsicCall >::call(xt); @@ -209,8 +206,9 @@ pub fn expand_outer_inherent( use #scrate::inherent::ProvideInherent; use #scrate::traits::{IsSubType, ExtrinsicCall}; - if #scrate::sp_runtime::traits::Extrinsic::is_signed(ext).unwrap_or(false) { - // Signed extrinsics are never inherents. + let is_bare = #scrate::sp_runtime::traits::Extrinsic::is_bare(ext); + if !is_bare { + // Signed extrinsics are not inherents. return false } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs index 0e76f9a9246..f55ca677d89 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -119,13 +119,13 @@ pub fn expand_runtime_metadata( call_ty, signature_ty, extra_ty, - signed_extensions: < + extensions: < < #extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata - >::SignedExtensions as #scrate::sp_runtime::traits::SignedExtension + >::Extra as #scrate::sp_runtime::traits::TransactionExtensionBase >::metadata() .into_iter() - .map(|meta| #scrate::__private::metadata_ir::SignedExtensionMetadataIR { + .map(|meta| #scrate::__private::metadata_ir::TransactionExtensionMetadataIR { identifier: meta.identifier, ty: meta.ty, additional_signed: meta.additional_signed, diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs index 83049919d01..341621deb09 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -153,6 +153,10 @@ pub fn expand_outer_origin( self.filter = #scrate::__private::sp_std::rc::Rc::new(Box::new(filter)); } + fn set_caller(&mut self, caller: OriginCaller) { + self.caller = caller; + } + fn set_caller_from(&mut self, other: impl Into) { self.caller = other.into().caller; } @@ -301,6 +305,16 @@ pub fn expand_outer_origin( } } + impl #scrate::__private::AsSystemOriginSigner<<#runtime as #system_path::Config>::AccountId> for RuntimeOrigin { + fn as_system_origin_signer(&self) -> Option<&<#runtime as #system_path::Config>::AccountId> { + if let OriginCaller::system(#system_path::Origin::<#runtime>::Signed(ref signed)) = &self.caller { + Some(signed) + } else { + None + } + } + } + #pallet_conversions }) } diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs index 4a313551aca..61787a74012 100644 --- a/substrate/frame/support/src/dispatch.rs +++ b/substrate/frame/support/src/dispatch.rs @@ -25,7 +25,7 @@ use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_runtime::{ generic::{CheckedExtrinsic, UncheckedExtrinsic}, - traits::SignedExtension, + traits::Dispatchable, DispatchError, RuntimeDebug, }; use sp_std::fmt; @@ -268,7 +268,8 @@ pub fn extract_actual_weight(result: &DispatchResultWithPostInfo, info: &Dispatc .calc_actual_weight(info) } -/// Extract the actual pays_fee from a dispatch result if any or fall back to the default weight. +/// Extract the actual pays_fee from a dispatch result if any or fall back to the default +/// weight. pub fn extract_actual_pays_fee(result: &DispatchResultWithPostInfo, info: &DispatchInfo) -> Pays { match result { Ok(post_info) => post_info, @@ -368,11 +369,10 @@ where } /// Implementation for unchecked extrinsic. -impl GetDispatchInfo - for UncheckedExtrinsic +impl GetDispatchInfo + for UncheckedExtrinsic where - Call: GetDispatchInfo, - Extra: SignedExtension, + Call: GetDispatchInfo + Dispatchable, { fn get_dispatch_info(&self) -> DispatchInfo { self.function.get_dispatch_info() @@ -380,7 +380,7 @@ where } /// Implementation for checked extrinsic. -impl GetDispatchInfo for CheckedExtrinsic +impl GetDispatchInfo for CheckedExtrinsic where Call: GetDispatchInfo, { @@ -389,21 +389,6 @@ where } } -/// Implementation for test extrinsic. -#[cfg(feature = "std")] -impl GetDispatchInfo - for sp_runtime::testing::TestXt -{ - fn get_dispatch_info(&self) -> DispatchInfo { - // for testing: weight == size. - DispatchInfo { - weight: Weight::from_parts(self.encode().len() as _, 0), - pays_fee: Pays::Yes, - class: self.call.get_dispatch_info().class, - } - } -} - /// A struct holding value for each `DispatchClass`. #[derive(Clone, Eq, PartialEq, Default, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] pub struct PerDispatchClass { diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index caf9457d666..b5b1ac09c60 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -54,7 +54,8 @@ pub mod __private { #[cfg(feature = "std")] pub use sp_runtime::{bounded_btree_map, bounded_vec}; pub use sp_runtime::{ - traits::Dispatchable, DispatchError, RuntimeDebug, StateVersion, TransactionOutcome, + traits::{AsSystemOriginSigner, Dispatchable}, + DispatchError, RuntimeDebug, StateVersion, TransactionOutcome, }; #[cfg(feature = "std")] pub use sp_state_machine::BasicExternalities; @@ -75,6 +76,7 @@ pub mod storage; #[cfg(test)] mod tests; pub mod traits; +pub mod transaction_extensions; pub mod weights; #[doc(hidden)] pub mod unsigned { @@ -1602,8 +1604,8 @@ pub mod pallet_macros { /// [`ValidateUnsigned`](frame_support::pallet_prelude::ValidateUnsigned) for /// type `Pallet`, and some optional where clause. /// - /// NOTE: There is also the [`sp_runtime::traits::SignedExtension`] trait that can be used - /// to add some specific logic for transaction validation. + /// NOTE: There is also the [`sp_runtime::traits::TransactionExtension`] trait that can be + /// used to add some specific logic for transaction validation. /// /// ## Macro expansion /// diff --git a/substrate/frame/support/src/traits/dispatch.rs b/substrate/frame/support/src/traits/dispatch.rs index de50ce7a26c..212c3080352 100644 --- a/substrate/frame/support/src/traits/dispatch.rs +++ b/substrate/frame/support/src/traits/dispatch.rs @@ -482,7 +482,7 @@ pub trait OriginTrait: Sized { type Call; /// The caller origin, overarching type of all pallets origins. - type PalletsOrigin: Into + CallerTrait + MaxEncodedLen; + type PalletsOrigin: Send + Sync + Into + CallerTrait + MaxEncodedLen; /// The AccountId used across the system. type AccountId; @@ -496,6 +496,14 @@ pub trait OriginTrait: Sized { /// Replace the caller with caller from the other origin fn set_caller_from(&mut self, other: impl Into); + /// Replace the caller with caller from the other origin + fn set_caller(&mut self, caller: Self::PalletsOrigin); + + /// Replace the caller with caller from the other origin + fn set_caller_from_signed(&mut self, caller_account: Self::AccountId) { + self.set_caller(Self::PalletsOrigin::from(RawOrigin::Signed(caller_account))) + } + /// Filter the call if caller is not root, if false is returned then the call must be filtered /// out. /// @@ -544,6 +552,17 @@ pub trait OriginTrait: Sized { fn as_system_ref(&self) -> Option<&RawOrigin> { self.caller().as_system_ref() } + + /// Extract a reference to the sytsem signer, if that's what the caller is. + fn as_system_signer(&self) -> Option<&Self::AccountId> { + self.caller().as_system_ref().and_then(|s| { + if let RawOrigin::Signed(ref who) = s { + Some(who) + } else { + None + } + }) + } } #[cfg(test)] diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index 1f634a64282..fba546ce21b 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -919,24 +919,13 @@ pub trait ExtrinsicCall: sp_runtime::traits::Extrinsic { fn call(&self) -> &Self::Call; } -#[cfg(feature = "std")] -impl ExtrinsicCall for sp_runtime::testing::TestXt -where - Call: codec::Codec + Sync + Send + TypeInfo, - Extra: TypeInfo, -{ - fn call(&self) -> &Self::Call { - &self.call - } -} - impl ExtrinsicCall for sp_runtime::generic::UncheckedExtrinsic where Address: TypeInfo, Call: TypeInfo, Signature: TypeInfo, - Extra: sp_runtime::traits::SignedExtension + TypeInfo, + Extra: TypeInfo, { fn call(&self) -> &Self::Call { &self.function diff --git a/substrate/frame/support/src/transaction_extensions.rs b/substrate/frame/support/src/transaction_extensions.rs new file mode 100644 index 00000000000..98d78b9b4e4 --- /dev/null +++ b/substrate/frame/support/src/transaction_extensions.rs @@ -0,0 +1,89 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Transaction extensions. + +use crate::{CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_io::hashing::blake2_256; +use sp_runtime::{ + impl_tx_ext_default, + traits::{ + transaction_extension::{TransactionExtensionBase, TransactionExtensionInterior}, + DispatchInfoOf, Dispatchable, IdentifyAccount, TransactionExtension, Verify, + }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, +}; + +#[derive( + CloneNoBound, EqNoBound, PartialEqNoBound, Encode, Decode, RuntimeDebugNoBound, TypeInfo, +)] +#[codec(encode_bound())] +#[codec(decode_bound())] +pub struct VerifyMultiSignature +where + V: TransactionExtensionInterior, + ::AccountId: TransactionExtensionInterior, +{ + signature: V, + account: ::AccountId, +} + +impl TransactionExtensionBase for VerifyMultiSignature +where + V: TransactionExtensionInterior, + ::AccountId: TransactionExtensionInterior, +{ + const IDENTIFIER: &'static str = "VerifyMultiSignature"; + type Implicit = (); +} + +impl TransactionExtension + for VerifyMultiSignature +where + V: TransactionExtensionInterior, + ::AccountId: TransactionExtensionInterior, + ::RuntimeOrigin: From::AccountId>>, +{ + type Val = (); + type Pre = (); + impl_tx_ext_default!(Call; Context; prepare); + + fn validate( + &self, + _origin: ::RuntimeOrigin, + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + _: &mut Context, + _: (), + inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + let msg = inherited_implication.using_encoded(blake2_256); + + if !self.signature.verify(&msg[..], &self.account) { + Err(InvalidTransaction::BadProof)? + } + // We clobber the original origin. Maybe we shuld check that it's none? + let origin = Some(self.account.clone()).into(); + Ok((ValidTransaction::default(), (), origin)) + } +} diff --git a/substrate/frame/support/src/weights/block_weights.rs b/substrate/frame/support/src/weights/block_weights.rs index 57a68554755..647b6198357 100644 --- a/substrate/frame/support/src/weights/block_weights.rs +++ b/substrate/frame/support/src/weights/block_weights.rs @@ -15,24 +15,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16 (Y/M/D) -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01 (Y/M/D) +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `block`, LONG-NAME: `BlockExecution`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` -//! WEIGHT-PATH: `./frame/support/src/weights/` +//! WEIGHT-PATH: `./substrate/frame/support/src/weights/` //! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // overhead // --chain=dev -// --execution=wasm // --wasm-execution=compiled -// --weight-path=./frame/support/src/weights/ -// --header=./HEADER-APACHE2 +// --weight-path=./substrate/frame/support/src/weights/ +// --header=./substrate/HEADER-APACHE2 // --warmup=10 // --repeat=100 @@ -44,17 +43,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 376_949, 622_462 - /// Average: 390_584 - /// Median: 386_322 - /// Std-Dev: 24792.0 + /// Min, Max: 424_332, 493_017 + /// Average: 437_118 + /// Median: 434_920 + /// Std-Dev: 8798.01 /// /// Percentiles nanoseconds: - /// 99th: 433_299 - /// 95th: 402_688 - /// 75th: 391_645 + /// 99th: 460_074 + /// 95th: 451_580 + /// 75th: 440_307 pub const BlockExecutionWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(390_584), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(437_118), 0); } #[cfg(test)] diff --git a/substrate/frame/support/src/weights/extrinsic_weights.rs b/substrate/frame/support/src/weights/extrinsic_weights.rs index a304f089ff7..99b392c4369 100644 --- a/substrate/frame/support/src/weights/extrinsic_weights.rs +++ b/substrate/frame/support/src/weights/extrinsic_weights.rs @@ -15,24 +15,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16 (Y/M/D) -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01 (Y/M/D) +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `extrinsic`, LONG-NAME: `ExtrinsicBase`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` -//! WEIGHT-PATH: `./frame/support/src/weights/` +//! WEIGHT-PATH: `./substrate/frame/support/src/weights/` //! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // overhead // --chain=dev -// --execution=wasm // --wasm-execution=compiled -// --weight-path=./frame/support/src/weights/ -// --header=./HEADER-APACHE2 +// --weight-path=./substrate/frame/support/src/weights/ +// --header=./substrate/HEADER-APACHE2 // --warmup=10 // --repeat=100 @@ -44,17 +43,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 123_875, 128_419 - /// Average: 124_414 - /// Median: 124_332 - /// Std-Dev: 497.74 + /// Min, Max: 106_053, 107_403 + /// Average: 106_446 + /// Median: 106_415 + /// Std-Dev: 216.17 /// /// Percentiles nanoseconds: - /// 99th: 125_245 - /// 95th: 124_989 - /// 75th: 124_498 + /// 99th: 107_042 + /// 95th: 106_841 + /// 75th: 106_544 pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(124_414), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(106_446), 0); } #[cfg(test)] diff --git a/substrate/frame/support/test/tests/construct_runtime.rs b/substrate/frame/support/test/tests/construct_runtime.rs index b7698681886..83691451ccd 100644 --- a/substrate/frame/support/test/tests/construct_runtime.rs +++ b/substrate/frame/support/test/tests/construct_runtime.rs @@ -815,7 +815,7 @@ fn test_metadata() { ty: meta_type::(), version: 4, signed_extensions: vec![SignedExtensionMetadata { - identifier: "UnitSignedExtension", + identifier: "UnitTransactionExtension", ty: meta_type::<()>(), additional_signed: meta_type::<()>(), }], diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index 61cfc07caed..9dd460da75a 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -477,7 +477,7 @@ note: required because it appears within the type `RuntimeCall` | ||_- in this macro invocation ... | note: required by a bound in `frame_support::sp_runtime::traits::Dispatchable::Config` - --> $WORKSPACE/substrate/primitives/runtime/src/traits.rs + --> $WORKSPACE/substrate/primitives/runtime/src/traits/mod.rs | | type Config; | ^^^^^^^^^^^^ required by this bound in `Dispatchable::Config` @@ -510,7 +510,7 @@ note: required because it appears within the type `RuntimeCall` | ||_- in this macro invocation ... | note: required by a bound in `frame_support::pallet_prelude::ValidateUnsigned::Call` - --> $WORKSPACE/substrate/primitives/runtime/src/traits.rs + --> $WORKSPACE/substrate/primitives/runtime/src/traits/mod.rs | | type Call; | ^^^^^^^^^^ required by this bound in `ValidateUnsigned::Call` diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 607f54ccc13..4d2737d411e 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -743,10 +743,40 @@ impl pallet5::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; } +#[derive(Clone, Debug, codec::Encode, codec::Decode, PartialEq, Eq, scale_info::TypeInfo)] +pub struct AccountU64(u64); +impl sp_runtime::traits::IdentifyAccount for AccountU64 { + type AccountId = u64; + fn into_account(self) -> u64 { + self.0 + } +} + +impl sp_runtime::traits::Verify for AccountU64 { + type Signer = AccountU64; + fn verify>( + &self, + _msg: L, + _signer: &::AccountId, + ) -> bool { + true + } +} + +impl From for AccountU64 { + fn from(value: u64) -> Self { + Self(value) + } +} + pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = - sp_runtime::testing::TestXt>; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic< + u64, + RuntimeCall, + AccountU64, + frame_system::CheckNonZeroSender, +>; frame_support::construct_runtime!( pub struct Runtime { @@ -896,10 +926,8 @@ fn inherent_expand() { let inherents = InherentData::new().create_extrinsics(); - let expected = vec![UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }]; + let expected = + vec![UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {}))]; assert_eq!(expected, inherents); let block = Block::new( @@ -911,14 +939,11 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 1, + bar: 0, + })), ], ); @@ -933,14 +958,11 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 0, bar: 0 }), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 0, + bar: 0, + })), ], ); @@ -954,10 +976,9 @@ fn inherent_expand() { BlakeTwo256::hash(b"test"), Digest::default(), ), - vec![UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), - signature: None, - }], + vec![UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_storage_layer { + foo: 0, + }))], ); let mut inherent = InherentData::new(); @@ -972,10 +993,12 @@ fn inherent_expand() { BlakeTwo256::hash(b"test"), Digest::default(), ), - vec![UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: Some((1, Default::default())), - }], + vec![UncheckedExtrinsic::new_signed( + RuntimeCall::Example(pallet::Call::foo_no_post_info {}), + 1, + 1.into(), + Default::default(), + )], ); let mut inherent = InherentData::new(); @@ -991,14 +1014,13 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 1, + bar: 1, + })), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_storage_layer { + foo: 0, + })), ], ); @@ -1013,18 +1035,14 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 1, + bar: 1, + })), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_storage_layer { + foo: 0, + })), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), ], ); @@ -1039,18 +1057,17 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), - signature: Some((1, Default::default())), - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 1, + bar: 1, + })), + UncheckedExtrinsic::new_signed( + RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), + 1, + 1.into(), + Default::default(), + ), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), ], ); @@ -1933,7 +1950,7 @@ fn extrinsic_metadata_ir_types() { >(), ir.signature_ty ); - assert_eq!(meta_type::<()>(), ir.signature_ty); + assert_eq!(meta_type::(), ir.signature_ty); assert_eq!(meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureExtra>(), ir.extra_ty); assert_eq!(meta_type::>(), ir.extra_ty); diff --git a/substrate/frame/support/test/tests/pallet_instance.rs b/substrate/frame/support/test/tests/pallet_instance.rs index f8cc97623b8..505400a7c21 100644 --- a/substrate/frame/support/test/tests/pallet_instance.rs +++ b/substrate/frame/support/test/tests/pallet_instance.rs @@ -943,7 +943,7 @@ fn metadata() { ty: scale_info::meta_type::(), version: 4, signed_extensions: vec![SignedExtensionMetadata { - identifier: "UnitSignedExtension", + identifier: "UnitTransactionExtension", ty: scale_info::meta_type::<()>(), additional_signed: scale_info::meta_type::<()>(), }], diff --git a/substrate/frame/system/benchmarking/src/extensions.rs b/substrate/frame/system/benchmarking/src/extensions.rs new file mode 100644 index 00000000000..1721501dead --- /dev/null +++ b/substrate/frame/system/benchmarking/src/extensions.rs @@ -0,0 +1,213 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Benchmarks for System Extensions + +#![cfg(feature = "runtime-benchmarks")] + +use frame_benchmarking::{account, impl_benchmark_test_suite, v2::*, BenchmarkError}; +use frame_support::{ + dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, + weights::Weight, +}; +use frame_system::{ + pallet_prelude::*, CheckGenesis, CheckMortality, CheckNonZeroSender, CheckNonce, + CheckSpecVersion, CheckTxVersion, CheckWeight, Config, Pallet as System, RawOrigin, +}; +use sp_runtime::{ + generic::Era, + traits::{AsSystemOriginSigner, DispatchTransaction, Dispatchable, Get}, +}; +use sp_std::prelude::*; + +pub struct Pallet(System); + +#[benchmarks(where + T: Send + Sync, + T::RuntimeCall: Dispatchable, + ::RuntimeOrigin: AsSystemOriginSigner + Clone) +] +mod benchmarks { + use super::*; + + #[benchmark] + fn check_genesis() -> Result<(), BenchmarkError> { + let len = 0_usize; + let caller = account("caller", 0, 0); + let info = DispatchInfo { weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + CheckGenesis::::new() + .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + + Ok(()) + } + + #[benchmark] + fn check_mortality() -> Result<(), BenchmarkError> { + let len = 0_usize; + let ext = CheckMortality::::from(Era::mortal(16, 256)); + let block_number: BlockNumberFor = 17u32.into(); + System::::set_block_number(block_number); + let prev_block: BlockNumberFor = 16u32.into(); + let default_hash: T::Hash = Default::default(); + frame_system::BlockHash::::insert(prev_block, default_hash); + let caller = account("caller", 0, 0); + let info = DispatchInfo { + weight: Weight::from_parts(100, 0), + class: DispatchClass::Normal, + ..Default::default() + }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_non_zero_sender() -> Result<(), BenchmarkError> { + let len = 0_usize; + let ext = CheckNonZeroSender::::new(); + let caller = account("caller", 0, 0); + let info = DispatchInfo { weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_nonce() -> Result<(), BenchmarkError> { + let caller: T::AccountId = account("caller", 0, 0); + let mut info = frame_system::AccountInfo::default(); + info.nonce = 1u32.into(); + info.providers = 1; + let expected_nonce = info.nonce + 1u32.into(); + frame_system::Account::::insert(caller.clone(), info); + let len = 0_usize; + let ext = CheckNonce::::from(1u32.into()); + let info = DispatchInfo { weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, len, |_| { + Ok(().into()) + }) + .unwrap() + .unwrap(); + } + + let updated_info = frame_system::Account::::get(caller.clone()); + assert_eq!(updated_info.nonce, expected_nonce); + Ok(()) + } + + #[benchmark] + fn check_spec_version() -> Result<(), BenchmarkError> { + let len = 0_usize; + let caller = account("caller", 0, 0); + let info = DispatchInfo { weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + CheckSpecVersion::::new() + .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_tx_version() -> Result<(), BenchmarkError> { + let len = 0_usize; + let caller = account("caller", 0, 0); + let info = DispatchInfo { weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + CheckTxVersion::::new() + .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_weight() -> Result<(), BenchmarkError> { + let caller = account("caller", 0, 0); + let base_extrinsic = ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + let info = DispatchInfo { + weight: Weight::from_parts(base_extrinsic.ref_time() * 5, 0), + class: DispatchClass::Normal, + ..Default::default() + }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(base_extrinsic.ref_time() * 2, 0)), + pays_fee: Default::default(), + }; + let len = 0_usize; + let base_extrinsic = ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + + let ext = CheckWeight::::new(); + + let initial_block_weight = Weight::from_parts(base_extrinsic.ref_time() * 2, 0); + frame_system::BlockWeight::::mutate(|current_weight| { + current_weight.set(Weight::zero(), DispatchClass::Mandatory); + current_weight.set(initial_block_weight, DispatchClass::Normal); + }); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(post_info)) + .unwrap() + .unwrap(); + } + + assert_eq!( + System::::block_weight().total(), + initial_block_weight + base_extrinsic + post_info.actual_weight.unwrap(), + ); + Ok(()) + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test,); +} diff --git a/substrate/frame/system/benchmarking/src/lib.rs b/substrate/frame/system/benchmarking/src/lib.rs index 18bfb85f52d..ebd16275bcb 100644 --- a/substrate/frame/system/benchmarking/src/lib.rs +++ b/substrate/frame/system/benchmarking/src/lib.rs @@ -28,6 +28,7 @@ use sp_core::storage::well_known_keys; use sp_runtime::traits::Hash; use sp_std::{prelude::*, vec}; +pub mod extensions; mod mock; pub struct Pallet(System); diff --git a/substrate/frame/system/benchmarking/src/mock.rs b/substrate/frame/system/benchmarking/src/mock.rs index edbf74a9a51..1f84dd53004 100644 --- a/substrate/frame/system/benchmarking/src/mock.rs +++ b/substrate/frame/system/benchmarking/src/mock.rs @@ -57,6 +57,7 @@ impl frame_system::Config for Test { type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type ExtensionsWeightInfo = (); type SS58Prefix = (); type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; diff --git a/substrate/frame/system/src/extensions/check_genesis.rs b/substrate/frame/system/src/extensions/check_genesis.rs index 76a711a823e..3f11f745cd9 100644 --- a/substrate/frame/system/src/extensions/check_genesis.rs +++ b/substrate/frame/system/src/extensions/check_genesis.rs @@ -19,7 +19,8 @@ use crate::{pallet_prelude::BlockNumberFor, Config, Pallet}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension, Zero}, + impl_tx_ext_default, + traits::{TransactionExtension, TransactionExtensionBase, Zero}, transaction_validity::TransactionValidityError, }; @@ -46,30 +47,26 @@ impl sp_std::fmt::Debug for CheckGenesis { } impl CheckGenesis { - /// Creates new `SignedExtension` to check genesis hash. + /// Creates new `TransactionExtension` to check genesis hash. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckGenesis { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = T::Hash; - type Pre = (); +impl TransactionExtensionBase for CheckGenesis { const IDENTIFIER: &'static str = "CheckGenesis"; - - fn additional_signed(&self) -> Result { + type Implicit = T::Hash; + fn implicit(&self) -> Result { Ok(>::block_hash(BlockNumberFor::::zero())) } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + fn weight(&self) -> sp_weights::Weight { + ::check_genesis() } } +impl TransactionExtension + for CheckGenesis +{ + type Val = (); + type Pre = (); + impl_tx_ext_default!(T::RuntimeCall; Context; validate prepare); +} diff --git a/substrate/frame/system/src/extensions/check_mortality.rs b/substrate/frame/system/src/extensions/check_mortality.rs index 148dfd4aad4..deb44880d64 100644 --- a/substrate/frame/system/src/extensions/check_mortality.rs +++ b/substrate/frame/system/src/extensions/check_mortality.rs @@ -20,10 +20,12 @@ use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ generic::Era, - traits::{DispatchInfoOf, SaturatedConversion, SignedExtension}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + impl_tx_ext_default, + traits::{ + DispatchInfoOf, SaturatedConversion, TransactionExtension, TransactionExtensionBase, + ValidateResult, }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, }; /// Check for transaction mortality. @@ -54,29 +56,11 @@ impl sp_std::fmt::Debug for CheckMortality { } } -impl SignedExtension for CheckMortality { - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = T::Hash; - type Pre = (); +impl TransactionExtensionBase for CheckMortality { const IDENTIFIER: &'static str = "CheckMortality"; + type Implicit = T::Hash; - fn validate( - &self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - let current_u64 = >::block_number().saturated_into::(); - let valid_till = self.0.death(current_u64); - Ok(ValidTransaction { - longevity: valid_till.saturating_sub(current_u64), - ..Default::default() - }) - } - - fn additional_signed(&self) -> Result { + fn implicit(&self) -> Result { let current_u64 = >::block_number().saturated_into::(); let n = self.0.birth(current_u64).saturated_into::>(); if !>::contains_key(n) { @@ -85,16 +69,38 @@ impl SignedExtension for CheckMortality { Ok(>::block_hash(n)) } } + fn weight(&self) -> sp_weights::Weight { + ::check_mortality() + } +} +impl TransactionExtension + for CheckMortality +{ + type Pre = (); + type Val = (); - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + fn validate( + &self, + origin: ::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + let current_u64 = >::block_number().saturated_into::(); + let valid_till = self.0.death(current_u64); + Ok(( + ValidTransaction { + longevity: valid_till.saturating_sub(current_u64), + ..Default::default() + }, + (), + origin, + )) } + impl_tx_ext_default!(T::RuntimeCall; Context; prepare); } #[cfg(test)] @@ -106,23 +112,21 @@ mod tests { weights::Weight, }; use sp_core::H256; + use sp_runtime::traits::DispatchTransaction; #[test] fn signed_ext_check_era_should_work() { new_test_ext().execute_with(|| { // future assert_eq!( - CheckMortality::::from(Era::mortal(4, 2)) - .additional_signed() - .err() - .unwrap(), + CheckMortality::::from(Era::mortal(4, 2)).implicit().err().unwrap(), InvalidTransaction::AncientBirthBlock.into(), ); // correct System::set_block_number(13); >::insert(12, H256::repeat_byte(1)); - assert!(CheckMortality::::from(Era::mortal(4, 12)).additional_signed().is_ok()); + assert!(CheckMortality::::from(Era::mortal(4, 12)).implicit().is_ok()); }) } @@ -142,7 +146,10 @@ mod tests { System::set_block_number(17); >::insert(16, H256::repeat_byte(1)); - assert_eq!(ext.validate(&1, CALL, &normal, len).unwrap().longevity, 15); + assert_eq!( + ext.validate_only(Some(1).into(), CALL, &normal, len).unwrap().0.longevity, + 15 + ); }) } } diff --git a/substrate/frame/system/src/extensions/check_non_zero_sender.rs b/substrate/frame/system/src/extensions/check_non_zero_sender.rs index 92eed60fc66..115ffac9b03 100644 --- a/substrate/frame/system/src/extensions/check_non_zero_sender.rs +++ b/substrate/frame/system/src/extensions/check_non_zero_sender.rs @@ -17,13 +17,14 @@ use crate::Config; use codec::{Decode, Encode}; -use frame_support::{dispatch::DispatchInfo, DefaultNoBound}; +use frame_support::{traits::OriginTrait, DefaultNoBound}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, SignedExtension}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + impl_tx_ext_default, + traits::{ + transaction_extension::TransactionExtensionBase, DispatchInfoOf, TransactionExtension, }, + transaction_validity::InvalidTransaction, }; use sp_std::{marker::PhantomData, prelude::*}; @@ -45,66 +46,82 @@ impl sp_std::fmt::Debug for CheckNonZeroSender { } impl CheckNonZeroSender { - /// Create new `SignedExtension` to check runtime version. + /// Create new `TransactionExtension` to check runtime version. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckNonZeroSender -where - T::RuntimeCall: Dispatchable, -{ - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); +impl TransactionExtensionBase for CheckNonZeroSender { const IDENTIFIER: &'static str = "CheckNonZeroSender"; - - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) + type Implicit = (); + fn weight(&self) -> sp_weights::Weight { + ::check_non_zero_sender() } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } - +} +impl TransactionExtension + for CheckNonZeroSender +{ + type Val = (); + type Pre = (); fn validate( &self, - who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { - if who.using_encoded(|d| d.iter().all(|x| *x == 0)) { - return Err(TransactionValidityError::Invalid(InvalidTransaction::BadSigner)) + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> sp_runtime::traits::ValidateResult { + if let Some(who) = origin.as_system_signer() { + if who.using_encoded(|d| d.iter().all(|x| *x == 0)) { + return Err(InvalidTransaction::BadSigner.into()) + } } - Ok(ValidTransaction::default()) + Ok((Default::default(), (), origin)) } + impl_tx_ext_default!(T::RuntimeCall; Context; prepare); } #[cfg(test)] mod tests { use super::*; use crate::mock::{new_test_ext, Test, CALL}; - use frame_support::{assert_noop, assert_ok}; + use frame_support::{assert_ok, dispatch::DispatchInfo}; + use sp_runtime::{traits::DispatchTransaction, TransactionValidityError}; #[test] fn zero_account_ban_works() { new_test_ext().execute_with(|| { let info = DispatchInfo::default(); let len = 0_usize; - assert_noop!( - CheckNonZeroSender::::new().validate(&0, CALL, &info, len), - InvalidTransaction::BadSigner + assert_eq!( + CheckNonZeroSender::::new() + .validate_only(Some(0).into(), CALL, &info, len) + .unwrap_err(), + TransactionValidityError::from(InvalidTransaction::BadSigner) ); - assert_ok!(CheckNonZeroSender::::new().validate(&1, CALL, &info, len)); + assert_ok!(CheckNonZeroSender::::new().validate_only( + Some(1).into(), + CALL, + &info, + len + )); + }) + } + + #[test] + fn unsigned_origin_works() { + new_test_ext().execute_with(|| { + let info = DispatchInfo::default(); + let len = 0_usize; + assert_ok!(CheckNonZeroSender::::new().validate_only( + None.into(), + CALL, + &info, + len + )); }) } } diff --git a/substrate/frame/system/src/extensions/check_nonce.rs b/substrate/frame/system/src/extensions/check_nonce.rs index 7504a814aef..ead9d9f7c81 100644 --- a/substrate/frame/system/src/extensions/check_nonce.rs +++ b/substrate/frame/system/src/extensions/check_nonce.rs @@ -15,16 +15,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::Config; +use crate::{AccountInfo, Config}; use codec::{Decode, Encode}; use frame_support::dispatch::DispatchInfo; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, One, SignedExtension, Zero}, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, One, TransactionExtension, + TransactionExtensionBase, ValidateResult, Zero, + }, transaction_validity::{ - InvalidTransaction, TransactionLongevity, TransactionValidity, TransactionValidityError, - ValidTransaction, + InvalidTransaction, TransactionLongevity, TransactionValidityError, ValidTransaction, }, + Saturating, }; use sp_std::vec; @@ -58,75 +61,78 @@ impl sp_std::fmt::Debug for CheckNonce { } } -impl SignedExtension for CheckNonce +impl TransactionExtensionBase for CheckNonce { + const IDENTIFIER: &'static str = "CheckNonce"; + type Implicit = (); + fn weight(&self) -> sp_weights::Weight { + ::check_nonce() + } +} +impl TransactionExtension for CheckNonce where T::RuntimeCall: Dispatchable, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, { - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); + type Val = Option<(T::AccountId, AccountInfo)>; type Pre = (); - const IDENTIFIER: &'static str = "CheckNonce"; - - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> Result<(), TransactionValidityError> { - let mut account = crate::Account::::get(who); - if account.providers.is_zero() && account.sufficients.is_zero() { - // Nonce storage not paid for - return Err(InvalidTransaction::Payment.into()) - } - if self.0 != account.nonce { - return Err(if self.0 < account.nonce { - InvalidTransaction::Stale - } else { - InvalidTransaction::Future - } - .into()) - } - account.nonce += T::Nonce::one(); - crate::Account::::insert(who, account); - Ok(()) - } fn validate( &self, - who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + let Some(who) = origin.as_system_origin_signer() else { + return Ok((Default::default(), None, origin)) + }; let account = crate::Account::::get(who); if account.providers.is_zero() && account.sufficients.is_zero() { // Nonce storage not paid for - return InvalidTransaction::Payment.into() + return Err(InvalidTransaction::Payment.into()) } if self.0 < account.nonce { - return InvalidTransaction::Stale.into() + return Err(InvalidTransaction::Stale.into()) } - let provides = vec![Encode::encode(&(who, self.0))]; + let provides = vec![Encode::encode(&(who.clone(), self.0))]; let requires = if account.nonce < self.0 { - vec![Encode::encode(&(who, self.0 - One::one()))] + vec![Encode::encode(&(who.clone(), self.0.saturating_sub(One::one())))] } else { vec![] }; - Ok(ValidTransaction { + let validity = ValidTransaction { priority: 0, requires, provides, longevity: TransactionLongevity::max_value(), propagate: true, - }) + }; + + Ok((validity, Some((who.clone(), account)), origin)) + } + + fn prepare( + self, + val: Self::Val, + _origin: &T::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + _context: &Context, + ) -> Result { + let Some((who, mut account)) = val else { return Ok(()) }; + // `self.0 < account.nonce` already checked in `validate`. + if self.0 > account.nonce { + return Err(InvalidTransaction::Future.into()) + } + account.nonce.saturating_inc(); + crate::Account::::insert(who, account); + Ok(()) } } @@ -134,7 +140,8 @@ where mod tests { use super::*; use crate::mock::{new_test_ext, Test, CALL}; - use frame_support::{assert_noop, assert_ok}; + use frame_support::assert_ok; + use sp_runtime::traits::DispatchTransaction; #[test] fn signed_ext_check_nonce_works() { @@ -152,22 +159,33 @@ mod tests { let info = DispatchInfo::default(); let len = 0_usize; // stale - assert_noop!( - CheckNonce::(0).validate(&1, CALL, &info, len), - InvalidTransaction::Stale + assert_eq!( + CheckNonce::(0) + .validate_only(Some(1).into(), CALL, &info, len,) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Stale) ); - assert_noop!( - CheckNonce::(0).pre_dispatch(&1, CALL, &info, len), - InvalidTransaction::Stale + assert_eq!( + CheckNonce::(0) + .validate_and_prepare(Some(1).into(), CALL, &info, len) + .unwrap_err(), + InvalidTransaction::Stale.into() ); // correct - assert_ok!(CheckNonce::(1).validate(&1, CALL, &info, len)); - assert_ok!(CheckNonce::(1).pre_dispatch(&1, CALL, &info, len)); + assert_ok!(CheckNonce::(1).validate_only(Some(1).into(), CALL, &info, len)); + assert_ok!(CheckNonce::(1).validate_and_prepare( + Some(1).into(), + CALL, + &info, + len + )); // future - assert_ok!(CheckNonce::(5).validate(&1, CALL, &info, len)); - assert_noop!( - CheckNonce::(5).pre_dispatch(&1, CALL, &info, len), - InvalidTransaction::Future + assert_ok!(CheckNonce::(5).validate_only(Some(1).into(), CALL, &info, len)); + assert_eq!( + CheckNonce::(5) + .validate_and_prepare(Some(1).into(), CALL, &info, len) + .unwrap_err(), + InvalidTransaction::Future.into() ); }) } @@ -198,20 +216,44 @@ mod tests { let info = DispatchInfo::default(); let len = 0_usize; // Both providers and sufficients zero - assert_noop!( - CheckNonce::(1).validate(&1, CALL, &info, len), - InvalidTransaction::Payment + assert_eq!( + CheckNonce::(1) + .validate_only(Some(1).into(), CALL, &info, len) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Payment) ); - assert_noop!( - CheckNonce::(1).pre_dispatch(&1, CALL, &info, len), - InvalidTransaction::Payment + assert_eq!( + CheckNonce::(1) + .validate_and_prepare(Some(1).into(), CALL, &info, len) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Payment) ); // Non-zero providers - assert_ok!(CheckNonce::(1).validate(&2, CALL, &info, len)); - assert_ok!(CheckNonce::(1).pre_dispatch(&2, CALL, &info, len)); + assert_ok!(CheckNonce::(1).validate_only(Some(2).into(), CALL, &info, len)); + assert_ok!(CheckNonce::(1).validate_and_prepare( + Some(2).into(), + CALL, + &info, + len + )); // Non-zero sufficients - assert_ok!(CheckNonce::(1).validate(&3, CALL, &info, len)); - assert_ok!(CheckNonce::(1).pre_dispatch(&3, CALL, &info, len)); + assert_ok!(CheckNonce::(1).validate_only(Some(3).into(), CALL, &info, len)); + assert_ok!(CheckNonce::(1).validate_and_prepare( + Some(3).into(), + CALL, + &info, + len + )); + }) + } + + #[test] + fn unsigned_check_nonce_works() { + new_test_ext().execute_with(|| { + let info = DispatchInfo::default(); + let len = 0_usize; + assert_ok!(CheckNonce::(1).validate_only(None.into(), CALL, &info, len)); + assert_ok!(CheckNonce::(1).validate_and_prepare(None.into(), CALL, &info, len)); }) } } diff --git a/substrate/frame/system/src/extensions/check_spec_version.rs b/substrate/frame/system/src/extensions/check_spec_version.rs index 24d5ef9cafb..3109708f48e 100644 --- a/substrate/frame/system/src/extensions/check_spec_version.rs +++ b/substrate/frame/system/src/extensions/check_spec_version.rs @@ -19,7 +19,8 @@ use crate::{Config, Pallet}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension}, + impl_tx_ext_default, + traits::{transaction_extension::TransactionExtensionBase, TransactionExtension}, transaction_validity::TransactionValidityError, }; @@ -46,30 +47,26 @@ impl sp_std::fmt::Debug for CheckSpecVersion { } impl CheckSpecVersion { - /// Create new `SignedExtension` to check runtime version. + /// Create new `TransactionExtension` to check runtime version. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckSpecVersion { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = u32; - type Pre = (); +impl TransactionExtensionBase for CheckSpecVersion { const IDENTIFIER: &'static str = "CheckSpecVersion"; - - fn additional_signed(&self) -> Result { + type Implicit = u32; + fn implicit(&self) -> Result { Ok(>::runtime_version().spec_version) } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + fn weight(&self) -> sp_weights::Weight { + ::check_spec_version() } } +impl TransactionExtension<::RuntimeCall, Context> + for CheckSpecVersion +{ + type Val = (); + type Pre = (); + impl_tx_ext_default!(::RuntimeCall; Context; validate prepare); +} diff --git a/substrate/frame/system/src/extensions/check_tx_version.rs b/substrate/frame/system/src/extensions/check_tx_version.rs index 3f9d6a1903f..ee1d507e7bc 100644 --- a/substrate/frame/system/src/extensions/check_tx_version.rs +++ b/substrate/frame/system/src/extensions/check_tx_version.rs @@ -19,7 +19,8 @@ use crate::{Config, Pallet}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension}, + impl_tx_ext_default, + traits::{transaction_extension::TransactionExtensionBase, TransactionExtension}, transaction_validity::TransactionValidityError, }; @@ -46,29 +47,26 @@ impl sp_std::fmt::Debug for CheckTxVersion { } impl CheckTxVersion { - /// Create new `SignedExtension` to check transaction version. + /// Create new `TransactionExtension` to check transaction version. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckTxVersion { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = u32; - type Pre = (); +impl TransactionExtensionBase for CheckTxVersion { const IDENTIFIER: &'static str = "CheckTxVersion"; - - fn additional_signed(&self) -> Result { + type Implicit = u32; + fn implicit(&self) -> Result { Ok(>::runtime_version().transaction_version) } - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + fn weight(&self) -> sp_weights::Weight { + ::check_tx_version() } } +impl TransactionExtension<::RuntimeCall, Context> + for CheckTxVersion +{ + type Val = (); + type Pre = (); + impl_tx_ext_default!(::RuntimeCall; Context; validate prepare); +} diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index 70d1e756332..5c35acfb3f3 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -23,9 +23,12 @@ use frame_support::{ }; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension}, - transaction_validity::{InvalidTransaction, TransactionValidity, TransactionValidityError}, - DispatchResult, + traits::{ + DispatchInfoOf, Dispatchable, PostDispatchInfoOf, TransactionExtension, + TransactionExtensionBase, ValidateResult, + }, + transaction_validity::{InvalidTransaction, TransactionValidityError}, + DispatchResult, ValidTransaction, }; use sp_weights::Weight; @@ -100,39 +103,43 @@ where } } - /// Creates new `SignedExtension` to check weight of the extrinsic. + /// Creates new `TransactionExtension` to check weight of the extrinsic. pub fn new() -> Self { Self(Default::default()) } - /// Do the pre-dispatch checks. This can be applied to both signed and unsigned. + /// Do the validate checks. This can be applied to both signed and unsigned. /// - /// It checks and notes the new weight and length. - pub fn do_pre_dispatch( + /// It only checks that the block weight and length limit will not exceed. + /// + /// Returns the transaction validity and the next block length, to be used in `prepare`. + pub fn do_validate( info: &DispatchInfoOf, len: usize, - ) -> Result<(), TransactionValidityError> { + ) -> Result<(ValidTransaction, u32), TransactionValidityError> { + // ignore the next length. If they return `Ok`, then it is below the limit. let next_len = Self::check_block_length(info, len)?; - let next_weight = Self::check_block_weight(info)?; + // during validation we skip block limit check. Since the `validate_transaction` + // call runs on an empty block anyway, by this we prevent `on_initialize` weight + // consumption from causing false negatives. Self::check_extrinsic_weight(info)?; - crate::AllExtrinsicsLen::::put(next_len); - crate::BlockWeight::::put(next_weight); - Ok(()) + Ok((Default::default(), next_len)) } - /// Do the validate checks. This can be applied to both signed and unsigned. + /// Do the pre-dispatch checks. This can be applied to both signed and unsigned. /// - /// It only checks that the block weight and length limit will not exceed. - pub fn do_validate(info: &DispatchInfoOf, len: usize) -> TransactionValidity { - // ignore the next length. If they return `Ok`, then it is below the limit. - let _ = Self::check_block_length(info, len)?; - // during validation we skip block limit check. Since the `validate_transaction` - // call runs on an empty block anyway, by this we prevent `on_initialize` weight - // consumption from causing false negatives. - Self::check_extrinsic_weight(info)?; + /// It checks and notes the new weight and length. + pub fn do_prepare( + info: &DispatchInfoOf, + next_len: u32, + ) -> Result<(), TransactionValidityError> { + let next_weight = Self::check_block_weight(info)?; + // Extrinsic weight already checked in `validate`. - Ok(Default::default()) + crate::AllExtrinsicsLen::::put(next_len); + crate::BlockWeight::::put(next_weight); + Ok(()) } } @@ -201,62 +208,55 @@ where Ok(all_weight) } -impl SignedExtension for CheckWeight +impl TransactionExtensionBase for CheckWeight { + const IDENTIFIER: &'static str = "CheckWeight"; + type Implicit = (); + + fn weight(&self) -> Weight { + ::check_weight() + } +} +impl TransactionExtension + for CheckWeight where T::RuntimeCall: Dispatchable, { - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); type Pre = (); - const IDENTIFIER: &'static str = "CheckWeight"; - - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result<(), TransactionValidityError> { - Self::do_pre_dispatch(info, len) - } + type Val = u32; /* next block length */ fn validate( &self, - _who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - Self::do_validate(info, len) - } - - fn pre_dispatch_unsigned( - _call: &Self::Call, - info: &DispatchInfoOf, + origin: T::RuntimeOrigin, + _call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> Result<(), TransactionValidityError> { - Self::do_pre_dispatch(info, len) + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + let (validity, next_len) = Self::do_validate(info, len)?; + Ok((validity, next_len, origin)) } - fn validate_unsigned( - _call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - Self::do_validate(info, len) + fn prepare( + self, + val: Self::Val, + _origin: &T::RuntimeOrigin, + _call: &T::RuntimeCall, + info: &DispatchInfoOf, + _len: usize, + _context: &Context, + ) -> Result { + Self::do_prepare(info, val) } fn post_dispatch( - _pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + _pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, _len: usize, _result: &DispatchResult, + _context: &Context, ) -> Result<(), TransactionValidityError> { let unspent = post_info.calc_unspent(info); if unspent.any_gt(Weight::zero()) { @@ -301,6 +301,7 @@ mod tests { AllExtrinsicsLen, BlockWeight, DispatchClass, }; use frame_support::{assert_err, assert_ok, dispatch::Pays, weights::Weight}; + use sp_runtime::traits::DispatchTransaction; use sp_std::marker::PhantomData; fn block_weights() -> crate::limits::BlockWeights { @@ -338,7 +339,8 @@ mod tests { } check(|max, len| { - assert_ok!(CheckWeight::::do_pre_dispatch(max, len)); + let next_len = CheckWeight::::check_block_length(max, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(max, next_len)); assert_eq!(System::block_weight().total(), Weight::MAX); assert!(System::block_weight().total().ref_time() > block_weight_limit().ref_time()); }); @@ -419,9 +421,11 @@ mod tests { let len = 0_usize; - assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + let next_len = CheckWeight::::check_block_length(&max_normal, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&max_normal, next_len)); assert_eq!(System::block_weight().total(), Weight::from_parts(768, 0)); - assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); + let next_len = CheckWeight::::check_block_length(&rest_operational, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&rest_operational, next_len)); assert_eq!(block_weight_limit(), Weight::from_parts(1024, u64::MAX)); assert_eq!(System::block_weight().total(), block_weight_limit().set_proof_size(0)); // Checking single extrinsic should not take current block weight into account. @@ -443,10 +447,12 @@ mod tests { let len = 0_usize; - assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); + let next_len = CheckWeight::::check_block_length(&rest_operational, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&rest_operational, next_len)); // Extra 20 here from block execution + base extrinsic weight assert_eq!(System::block_weight().total(), Weight::from_parts(266, 0)); - assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + let next_len = CheckWeight::::check_block_length(&max_normal, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&max_normal, next_len)); assert_eq!(block_weight_limit(), Weight::from_parts(1024, u64::MAX)); assert_eq!(System::block_weight().total(), block_weight_limit().set_proof_size(0)); }); @@ -469,16 +475,19 @@ mod tests { }; let len = 0_usize; + let next_len = CheckWeight::::check_block_length(&dispatch_normal, len).unwrap(); assert_err!( - CheckWeight::::do_pre_dispatch(&dispatch_normal, len), + CheckWeight::::do_prepare(&dispatch_normal, next_len), InvalidTransaction::ExhaustsResources ); + let next_len = + CheckWeight::::check_block_length(&dispatch_operational, len).unwrap(); // Thank goodness we can still do an operational transaction to possibly save the // blockchain. - assert_ok!(CheckWeight::::do_pre_dispatch(&dispatch_operational, len)); + assert_ok!(CheckWeight::::do_prepare(&dispatch_operational, next_len)); // Not too much though assert_err!( - CheckWeight::::do_pre_dispatch(&dispatch_operational, len), + CheckWeight::::do_prepare(&dispatch_operational, next_len), InvalidTransaction::ExhaustsResources ); // Even with full block, validity of single transaction should be correct. @@ -503,21 +512,35 @@ mod tests { current_weight.set(normal_limit, DispatchClass::Normal) }); // will not fit. - assert_err!( - CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len), - InvalidTransaction::ExhaustsResources + assert_eq!( + CheckWeight::(PhantomData) + .validate_and_prepare(Some(1).into(), CALL, &normal, len) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() ); // will fit. - assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len)); + assert_ok!(CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + &op, + len + )); // likewise for length limit. let len = 100_usize; AllExtrinsicsLen::::put(normal_length_limit()); - assert_err!( - CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len), - InvalidTransaction::ExhaustsResources + assert_eq!( + CheckWeight::(PhantomData) + .validate_and_prepare(Some(1).into(), CALL, &normal, len) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() ); - assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len)); + assert_ok!(CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + &op, + len + )); }) } @@ -528,7 +551,12 @@ mod tests { let normal_limit = normal_weight_limit().ref_time() as usize; let reset_check_weight = |tx, s, f| { AllExtrinsicsLen::::put(0); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, tx, s); + let r = CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + tx, + s, + ); if f { assert!(r.is_err()) } else { @@ -571,7 +599,12 @@ mod tests { BlockWeight::::mutate(|current_weight| { current_weight.set(s, DispatchClass::Normal) }); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, i, len); + let r = CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + i, + len, + ); if f { assert!(r.is_err()) } else { @@ -604,18 +637,22 @@ mod tests { .set(Weight::from_parts(256, 0) - base_extrinsic, DispatchClass::Normal); }); - let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); + let pre = CheckWeight::(PhantomData) + .validate_and_prepare(Some(1).into(), CALL, &info, len) + .unwrap() + .0; assert_eq!( BlockWeight::::get().total(), info.weight + Weight::from_parts(256, 0) ); assert_ok!(CheckWeight::::post_dispatch( - Some(pre), + pre, &info, &post_info, len, - &Ok(()) + &Ok(()), + &() )); assert_eq!( BlockWeight::::get().total(), @@ -639,7 +676,10 @@ mod tests { current_weight.set(Weight::from_parts(128, 0), DispatchClass::Normal); }); - let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); + let pre = CheckWeight::(PhantomData) + .validate_and_prepare(Some(1).into(), CALL, &info, len) + .unwrap() + .0; assert_eq!( BlockWeight::::get().total(), info.weight + @@ -648,11 +688,12 @@ mod tests { ); assert_ok!(CheckWeight::::post_dispatch( - Some(pre), + pre, &info, &post_info, len, - &Ok(()) + &Ok(()), + &() )); assert_eq!( BlockWeight::::get().total(), @@ -672,7 +713,12 @@ mod tests { // Initial weight from `weights.base_block` assert_eq!(System::block_weight().total(), weights.base_block); - assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &free, len)); + assert_ok!(CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + &free, + len + )); assert_eq!( System::block_weight().total(), weights.get(DispatchClass::Normal).base_extrinsic + weights.base_block @@ -696,9 +742,11 @@ mod tests { let len = 0_usize; - assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + let next_len = CheckWeight::::check_block_length(&max_normal, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&max_normal, next_len)); assert_eq!(System::block_weight().total(), Weight::from_parts(768, 0)); - assert_ok!(CheckWeight::::do_pre_dispatch(&mandatory, len)); + let next_len = CheckWeight::::check_block_length(&mandatory, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&mandatory, next_len)); assert_eq!(block_weight_limit(), Weight::from_parts(1024, u64::MAX)); assert_eq!(System::block_weight().total(), Weight::from_parts(1024 + 768, 0)); assert_eq!(CheckWeight::::check_extrinsic_weight(&mandatory), Ok(())); diff --git a/substrate/frame/system/src/extensions/mod.rs b/substrate/frame/system/src/extensions/mod.rs index a88c9fbf96e..d79104d2240 100644 --- a/substrate/frame/system/src/extensions/mod.rs +++ b/substrate/frame/system/src/extensions/mod.rs @@ -22,3 +22,6 @@ pub mod check_nonce; pub mod check_spec_version; pub mod check_tx_version; pub mod check_weight; +pub mod weights; + +pub use weights::WeightInfo; diff --git a/substrate/frame/system/src/extensions/weights.rs b/substrate/frame/system/src/extensions/weights.rs new file mode 100644 index 00000000000..0d2fcb15ee6 --- /dev/null +++ b/substrate/frame/system/src/extensions/weights.rs @@ -0,0 +1,196 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/system/src/extensions/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `frame_system_extensions`. +pub trait WeightInfo { + fn check_genesis() -> Weight; + fn check_mortality() -> Weight; + fn check_non_zero_sender() -> Weight; + fn check_nonce() -> Weight; + fn check_spec_version() -> Weight; + fn check_tx_version() -> Weight; + fn check_weight() -> Weight; +} + +/// Weights for `frame_system_extensions` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_876_000 picoseconds. + Weight::from_parts(4_160_000, 3509) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 6_296_000 picoseconds. + Weight::from_parts(6_523_000, 3509) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 449_000 picoseconds. + Weight::from_parts(527_000, 0) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 5_689_000 picoseconds. + Weight::from_parts(6_000_000, 3593) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 399_000 picoseconds. + Weight::from_parts(461_000, 0) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 390_000 picoseconds. + Weight::from_parts(439_000, 0) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 4_375_000 picoseconds. + Weight::from_parts(4_747_000, 1489) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_876_000 picoseconds. + Weight::from_parts(4_160_000, 3509) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 6_296_000 picoseconds. + Weight::from_parts(6_523_000, 3509) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 449_000 picoseconds. + Weight::from_parts(527_000, 0) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 5_689_000 picoseconds. + Weight::from_parts(6_000_000, 3593) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 399_000 picoseconds. + Weight::from_parts(461_000, 0) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 390_000 picoseconds. + Weight::from_parts(439_000, 0) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 4_375_000 picoseconds. + Weight::from_parts(4_747_000, 1489) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 3b659916379..21393cb3e20 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -166,7 +166,7 @@ pub use extensions::{ check_genesis::CheckGenesis, check_mortality::CheckMortality, check_non_zero_sender::CheckNonZeroSender, check_nonce::CheckNonce, check_spec_version::CheckSpecVersion, check_tx_version::CheckTxVersion, - check_weight::CheckWeight, + check_weight::CheckWeight, WeightInfo as ExtensionsWeightInfo, }; // Backward compatible re-export. pub use extensions::check_mortality::CheckMortality as CheckEra; @@ -284,6 +284,7 @@ pub mod pallet { type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type ExtensionsWeightInfo = (); type SS58Prefix = (); type Version = (); type BlockWeights = (); @@ -356,6 +357,9 @@ pub mod pallet { /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = (); + /// Weight information for the extensions of this pallet. + type ExtensionsWeightInfo = (); + /// This is used as an identifier of the chain. type SS58Prefix = (); @@ -563,8 +567,12 @@ pub mod pallet { /// All resources should be cleaned up associated with the given account. type OnKilledAccount: OnKilledAccount; + /// Weight information for the extrinsics of this pallet. type SystemWeightInfo: WeightInfo; + /// Weight information for the transaction extensions of this pallet. + type ExtensionsWeightInfo: extensions::WeightInfo; + /// The designated SS58 prefix of this chain. /// /// This replaces the "ss58Format" property declared in the chain spec. Reason is diff --git a/substrate/frame/system/src/offchain.rs b/substrate/frame/system/src/offchain.rs index a019cfd666e..ee73e33fe31 100644 --- a/substrate/frame/system/src/offchain.rs +++ b/substrate/frame/system/src/offchain.rs @@ -79,6 +79,9 @@ pub struct SubmitTransaction, Overarchi _phantom: sp_std::marker::PhantomData<(T, OverarchingCall)>, } +// TODO [#2415]: Avoid splitting call and the totally opaque `signature`; `CreateTransaction` trait +// should provide something which impls `Encode`, which can be sent onwards to +// `sp_io::offchain::submit_transaction`. There's no great need to split things up as in here. impl SubmitTransaction where T: SendTransactionTypes, @@ -88,6 +91,8 @@ where call: >::OverarchingCall, signature: Option<::SignaturePayload>, ) -> Result<(), ()> { + // TODO: Use regular transaction API instead. + #[allow(deprecated)] let xt = T::Extrinsic::new(call, signature).ok_or(())?; sp_io::offchain::submit_transaction(xt.encode()) } @@ -471,7 +476,7 @@ pub trait SendTransactionTypes { /// /// This trait is meant to be implemented by the runtime and is responsible for constructing /// a payload to be signed and contained within the extrinsic. -/// This will most likely include creation of `SignedExtra` (a set of `SignedExtensions`). +/// This will most likely include creation of `TxExtension` (a tuple of `TransactionExtension`s). /// Note that the result can be altered by inspecting the `Call` (for instance adjusting /// fees, or mortality depending on the `pallet` being called). pub trait CreateSignedTransaction: @@ -621,14 +626,17 @@ mod tests { use crate::mock::{RuntimeCall, Test as TestRuntime, CALL}; use codec::Decode; use sp_core::offchain::{testing, TransactionPoolExt}; - use sp_runtime::testing::{TestSignature, TestXt, UintAuthorityId}; + use sp_runtime::{ + generic::UncheckedExtrinsic, + testing::{TestSignature, UintAuthorityId}, + }; impl SigningTypes for TestRuntime { type Public = UintAuthorityId; type Signature = TestSignature; } - type Extrinsic = TestXt; + type Extrinsic = UncheckedExtrinsic; impl SendTransactionTypes for TestRuntime { type Extrinsic = Extrinsic; @@ -693,7 +701,7 @@ mod tests { let _tx3 = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx1 = Extrinsic::decode(&mut &*tx1).unwrap(); - assert_eq!(tx1.signature, None); + assert!(tx1.is_inherent()); }); } @@ -724,7 +732,7 @@ mod tests { let tx1 = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx1 = Extrinsic::decode(&mut &*tx1).unwrap(); - assert_eq!(tx1.signature, None); + assert!(tx1.is_inherent()); }); } @@ -758,7 +766,7 @@ mod tests { let _tx2 = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx1 = Extrinsic::decode(&mut &*tx1).unwrap(); - assert_eq!(tx1.signature, None); + assert!(tx1.is_inherent()); }); } @@ -790,7 +798,7 @@ mod tests { let tx1 = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx1 = Extrinsic::decode(&mut &*tx1).unwrap(); - assert_eq!(tx1.signature, None); + assert!(tx1.is_inherent()); }); } } diff --git a/substrate/frame/system/src/weights.rs b/substrate/frame/system/src/weights.rs index 41807dea1c5..bd64bbf3763 100644 --- a/substrate/frame/system/src/weights.rs +++ b/substrate/frame/system/src/weights.rs @@ -15,30 +15,31 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for frame_system +//! Autogenerated weights for `frame_system` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-s7kdgajz-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=frame_system +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=frame-system -// --chain=dev -// --header=./HEADER-APACHE2 -// --output=./frame/system/src/weights.rs -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/system/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for frame_system. +/// Weight functions needed for `frame_system`. pub trait WeightInfo { fn remark(b: u32, ) -> Weight; fn remark_with_event(b: u32, ) -> Weight; @@ -61,7 +62,7 @@ pub trait WeightInfo { fn apply_authorized_upgrade() -> Weight; } -/// Weights for frame_system using the Substrate node and recommended hardware. +/// Weights for `frame_system` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 3932160]`. @@ -69,84 +70,86 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_004_000 picoseconds. - Weight::from_parts(2_119_000, 0) + // Minimum execution time: 2_130_000 picoseconds. + Weight::from_parts(2_976_430, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(390, 0).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(386, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_032_000 picoseconds. - Weight::from_parts(8_097_000, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_455, 0).saturating_mul(b.into())) + // Minimum execution time: 5_690_000 picoseconds. + Weight::from_parts(15_071_416, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_387, 0).saturating_mul(b.into())) } - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a686561707061676573` (r:0 w:1) - /// Proof Skipped: unknown `0x3a686561707061676573` (r:0 w:1) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) fn set_heap_pages() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 4_446_000 picoseconds. - Weight::from_parts(4_782_000, 1485) + // Minimum execution time: 3_822_000 picoseconds. + Weight::from_parts(4_099_000, 1485) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a636f6465` (r:0 w:1) - /// Proof Skipped: unknown `0x3a636f6465` (r:0 w:1) + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn set_code() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 84_000_503_000 picoseconds. - Weight::from_parts(87_586_619_000, 1485) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Measured: `142` + // Estimated: `67035` + // Minimum execution time: 81_512_045_000 picoseconds. + Weight::from_parts(82_321_281_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn set_storage(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_086_000 picoseconds. - Weight::from_parts(2_175_000, 0) - // Standard Error: 1_056 - .saturating_add(Weight::from_parts(841_511, 0).saturating_mul(i.into())) + // Minimum execution time: 2_074_000 picoseconds. + Weight::from_parts(2_137_000, 0) + // Standard Error: 879 + .saturating_add(Weight::from_parts(797_224, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn kill_storage(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_000_000 picoseconds. - Weight::from_parts(2_255_000, 0) - // Standard Error: 1_425 - .saturating_add(Weight::from_parts(662_473, 0).saturating_mul(i.into())) + // Minimum execution time: 2_122_000 picoseconds. + Weight::from_parts(2_208_000, 0) + // Standard Error: 855 + .saturating_add(Weight::from_parts(594_034, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `115 + p * (69 Β±0)` - // Estimated: `128 + p * (70 Β±0)` - // Minimum execution time: 4_189_000 picoseconds. - Weight::from_parts(4_270_000, 128) - // Standard Error: 2_296 - .saturating_add(Weight::from_parts(1_389_650, 0).saturating_mul(p.into())) + // Measured: `129 + p * (69 Β±0)` + // Estimated: `135 + p * (70 Β±0)` + // Minimum execution time: 3_992_000 picoseconds. + Weight::from_parts(4_170_000, 135) + // Standard Error: 1_377 + .saturating_add(Weight::from_parts(1_267_892, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) @@ -157,114 +160,116 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 33_027_000 picoseconds. - Weight::from_parts(33_027_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) + // Minimum execution time: 8_872_000 picoseconds. + Weight::from_parts(9_513_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) /// Storage: `System::Digest` (r:1 w:1) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn apply_authorized_upgrade() -> Weight { // Proof Size summary in bytes: - // Measured: `22` - // Estimated: `1518` - // Minimum execution time: 118_101_992_000 picoseconds. - Weight::from_parts(118_101_992_000, 0) - .saturating_add(Weight::from_parts(0, 1518)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(3)) + // Measured: `164` + // Estimated: `67035` + // Minimum execution time: 85_037_546_000 picoseconds. + Weight::from_parts(85_819_414_000, 67035) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { /// The range of component `b` is `[0, 3932160]`. fn remark(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_004_000 picoseconds. - Weight::from_parts(2_119_000, 0) + // Minimum execution time: 2_130_000 picoseconds. + Weight::from_parts(2_976_430, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(390, 0).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(386, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_032_000 picoseconds. - Weight::from_parts(8_097_000, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_455, 0).saturating_mul(b.into())) + // Minimum execution time: 5_690_000 picoseconds. + Weight::from_parts(15_071_416, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_387, 0).saturating_mul(b.into())) } - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a686561707061676573` (r:0 w:1) - /// Proof Skipped: unknown `0x3a686561707061676573` (r:0 w:1) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) fn set_heap_pages() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 4_446_000 picoseconds. - Weight::from_parts(4_782_000, 1485) + // Minimum execution time: 3_822_000 picoseconds. + Weight::from_parts(4_099_000, 1485) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a636f6465` (r:0 w:1) - /// Proof Skipped: unknown `0x3a636f6465` (r:0 w:1) + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn set_code() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 84_000_503_000 picoseconds. - Weight::from_parts(87_586_619_000, 1485) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Measured: `142` + // Estimated: `67035` + // Minimum execution time: 81_512_045_000 picoseconds. + Weight::from_parts(82_321_281_000, 67035) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn set_storage(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_086_000 picoseconds. - Weight::from_parts(2_175_000, 0) - // Standard Error: 1_056 - .saturating_add(Weight::from_parts(841_511, 0).saturating_mul(i.into())) + // Minimum execution time: 2_074_000 picoseconds. + Weight::from_parts(2_137_000, 0) + // Standard Error: 879 + .saturating_add(Weight::from_parts(797_224, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn kill_storage(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_000_000 picoseconds. - Weight::from_parts(2_255_000, 0) - // Standard Error: 1_425 - .saturating_add(Weight::from_parts(662_473, 0).saturating_mul(i.into())) + // Minimum execution time: 2_122_000 picoseconds. + Weight::from_parts(2_208_000, 0) + // Standard Error: 855 + .saturating_add(Weight::from_parts(594_034, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `115 + p * (69 Β±0)` - // Estimated: `128 + p * (70 Β±0)` - // Minimum execution time: 4_189_000 picoseconds. - Weight::from_parts(4_270_000, 128) - // Standard Error: 2_296 - .saturating_add(Weight::from_parts(1_389_650, 0).saturating_mul(p.into())) + // Measured: `129 + p * (69 Β±0)` + // Estimated: `135 + p * (70 Β±0)` + // Minimum execution time: 3_992_000 picoseconds. + Weight::from_parts(4_170_000, 135) + // Standard Error: 1_377 + .saturating_add(Weight::from_parts(1_267_892, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) @@ -275,25 +280,25 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 33_027_000 picoseconds. - Weight::from_parts(33_027_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(RocksDbWeight::get().writes(1)) + // Minimum execution time: 8_872_000 picoseconds. + Weight::from_parts(9_513_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) /// Storage: `System::Digest` (r:1 w:1) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn apply_authorized_upgrade() -> Weight { // Proof Size summary in bytes: - // Measured: `22` - // Estimated: `1518` - // Minimum execution time: 118_101_992_000 picoseconds. - Weight::from_parts(118_101_992_000, 0) - .saturating_add(Weight::from_parts(0, 1518)) - .saturating_add(RocksDbWeight::get().reads(2)) - .saturating_add(RocksDbWeight::get().writes(3)) + // Measured: `164` + // Estimated: `67035` + // Minimum execution time: 85_037_546_000 picoseconds. + Weight::from_parts(85_819_414_000, 67035) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } } diff --git a/substrate/frame/timestamp/src/weights.rs b/substrate/frame/timestamp/src/weights.rs index 46c54473486..2df68ba0f1e 100644 --- a/substrate/frame/timestamp/src/weights.rs +++ b/substrate/frame/timestamp/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_timestamp +//! Autogenerated weights for `pallet_timestamp` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/timestamp/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/timestamp/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,57 +49,57 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_timestamp. +/// Weight functions needed for `pallet_timestamp`. pub trait WeightInfo { fn set() -> Weight; fn on_finalize() -> Weight; } -/// Weights for pallet_timestamp using the Substrate node and recommended hardware. +/// Weights for `pallet_timestamp` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Timestamp Now (r:1 w:1) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe CurrentSlot (r:1 w:0) - /// Proof: Babe CurrentSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::CurrentSlot` (r:1 w:0) + /// Proof: `Babe::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set() -> Weight { // Proof Size summary in bytes: - // Measured: `312` + // Measured: `345` // Estimated: `1493` - // Minimum execution time: 9_857_000 picoseconds. - Weight::from_parts(10_492_000, 1493) + // Minimum execution time: 8_417_000 picoseconds. + Weight::from_parts(8_932_000, 1493) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } fn on_finalize() -> Weight { // Proof Size summary in bytes: - // Measured: `161` + // Measured: `194` // Estimated: `0` - // Minimum execution time: 4_175_000 picoseconds. - Weight::from_parts(4_334_000, 0) + // Minimum execution time: 3_911_000 picoseconds. + Weight::from_parts(4_092_000, 0) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Timestamp Now (r:1 w:1) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe CurrentSlot (r:1 w:0) - /// Proof: Babe CurrentSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::CurrentSlot` (r:1 w:0) + /// Proof: `Babe::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set() -> Weight { // Proof Size summary in bytes: - // Measured: `312` + // Measured: `345` // Estimated: `1493` - // Minimum execution time: 9_857_000 picoseconds. - Weight::from_parts(10_492_000, 1493) + // Minimum execution time: 8_417_000 picoseconds. + Weight::from_parts(8_932_000, 1493) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } fn on_finalize() -> Weight { // Proof Size summary in bytes: - // Measured: `161` + // Measured: `194` // Estimated: `0` - // Minimum execution time: 4_175_000 picoseconds. - Weight::from_parts(4_334_000, 0) + // Minimum execution time: 3_911_000 picoseconds. + Weight::from_parts(4_092_000, 0) } } diff --git a/substrate/frame/tips/src/weights.rs b/substrate/frame/tips/src/weights.rs index ec622866715..dbe1ed246ff 100644 --- a/substrate/frame/tips/src/weights.rs +++ b/substrate/frame/tips/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_tips +//! Autogenerated weights for `pallet_tips` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/tips/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/tips/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_tips. +/// Weight functions needed for `pallet_tips`. pub trait WeightInfo { fn report_awesome(r: u32, ) -> Weight; fn retract_tip() -> Weight; @@ -60,220 +59,220 @@ pub trait WeightInfo { fn slash_tip(t: u32, ) -> Weight; } -/// Weights for pallet_tips using the Substrate node and recommended hardware. +/// Weights for `pallet_tips` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Tips Reasons (r:1 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) + /// Storage: `Tips::Reasons` (r:1 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 300]`. fn report_awesome(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3469` - // Minimum execution time: 29_576_000 picoseconds. - Weight::from_parts(30_722_650, 3469) - // Standard Error: 192 - .saturating_add(Weight::from_parts(2_601, 0).saturating_mul(r.into())) + // Minimum execution time: 25_145_000 picoseconds. + Weight::from_parts(26_085_800, 3469) + // Standard Error: 216 + .saturating_add(Weight::from_parts(5_121, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Reasons` (r:0 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) fn retract_tip() -> Weight { // Proof Size summary in bytes: // Measured: `221` // Estimated: `3686` - // Minimum execution time: 28_522_000 picoseconds. - Weight::from_parts(29_323_000, 3686) + // Minimum execution time: 25_302_000 picoseconds. + Weight::from_parts(26_229_000, 3686) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:1 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Tips (r:0 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Reasons` (r:1 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Tips` (r:0 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 300]`. /// The range of component `t` is `[1, 13]`. fn tip_new(r: u32, t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `526 + t * (64 Β±0)` // Estimated: `3991 + t * (64 Β±0)` - // Minimum execution time: 19_650_000 picoseconds. - Weight::from_parts(19_837_982, 3991) - // Standard Error: 151 - .saturating_add(Weight::from_parts(1_746, 0).saturating_mul(r.into())) - // Standard Error: 3_588 - .saturating_add(Weight::from_parts(102_359, 0).saturating_mul(t.into())) + // Minimum execution time: 16_600_000 picoseconds. + Weight::from_parts(17_071_638, 3991) + // Standard Error: 131 + .saturating_add(Weight::from_parts(1_138, 0).saturating_mul(r.into())) + // Standard Error: 3_125 + .saturating_add(Weight::from_parts(82_996, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(t.into())) } - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[1, 13]`. fn tip(t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `747 + t * (112 Β±0)` // Estimated: `4212 + t * (112 Β±0)` - // Minimum execution time: 15_641_000 picoseconds. - Weight::from_parts(15_745_460, 4212) - // Standard Error: 5_106 - .saturating_add(Weight::from_parts(229_475, 0).saturating_mul(t.into())) + // Minimum execution time: 13_972_000 picoseconds. + Weight::from_parts(14_269_356, 4212) + // Standard Error: 4_695 + .saturating_add(Weight::from_parts(205_433, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Tips::Reasons` (r:0 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[1, 13]`. fn close_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `786 + t * (112 Β±0)` // Estimated: `4242 + t * (112 Β±0)` - // Minimum execution time: 62_059_000 picoseconds. - Weight::from_parts(64_604_554, 4242) - // Standard Error: 11_818 - .saturating_add(Weight::from_parts(116_297, 0).saturating_mul(t.into())) + // Minimum execution time: 51_878_000 picoseconds. + Weight::from_parts(54_513_477, 4242) + // Standard Error: 12_281 + .saturating_add(Weight::from_parts(126_605, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Reasons` (r:0 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[1, 13]`. fn slash_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `269` // Estimated: `3734` - // Minimum execution time: 14_133_000 picoseconds. - Weight::from_parts(14_957_547, 3734) - // Standard Error: 2_765 - .saturating_add(Weight::from_parts(22_138, 0).saturating_mul(t.into())) + // Minimum execution time: 11_800_000 picoseconds. + Weight::from_parts(12_520_984, 3734) + // Standard Error: 2_360 + .saturating_add(Weight::from_parts(28_777, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Tips Reasons (r:1 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) + /// Storage: `Tips::Reasons` (r:1 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 300]`. fn report_awesome(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3469` - // Minimum execution time: 29_576_000 picoseconds. - Weight::from_parts(30_722_650, 3469) - // Standard Error: 192 - .saturating_add(Weight::from_parts(2_601, 0).saturating_mul(r.into())) + // Minimum execution time: 25_145_000 picoseconds. + Weight::from_parts(26_085_800, 3469) + // Standard Error: 216 + .saturating_add(Weight::from_parts(5_121, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Reasons` (r:0 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) fn retract_tip() -> Weight { // Proof Size summary in bytes: // Measured: `221` // Estimated: `3686` - // Minimum execution time: 28_522_000 picoseconds. - Weight::from_parts(29_323_000, 3686) + // Minimum execution time: 25_302_000 picoseconds. + Weight::from_parts(26_229_000, 3686) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:1 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Tips (r:0 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Reasons` (r:1 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Tips` (r:0 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 300]`. /// The range of component `t` is `[1, 13]`. fn tip_new(r: u32, t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `526 + t * (64 Β±0)` // Estimated: `3991 + t * (64 Β±0)` - // Minimum execution time: 19_650_000 picoseconds. - Weight::from_parts(19_837_982, 3991) - // Standard Error: 151 - .saturating_add(Weight::from_parts(1_746, 0).saturating_mul(r.into())) - // Standard Error: 3_588 - .saturating_add(Weight::from_parts(102_359, 0).saturating_mul(t.into())) + // Minimum execution time: 16_600_000 picoseconds. + Weight::from_parts(17_071_638, 3991) + // Standard Error: 131 + .saturating_add(Weight::from_parts(1_138, 0).saturating_mul(r.into())) + // Standard Error: 3_125 + .saturating_add(Weight::from_parts(82_996, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(t.into())) } - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[1, 13]`. fn tip(t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `747 + t * (112 Β±0)` // Estimated: `4212 + t * (112 Β±0)` - // Minimum execution time: 15_641_000 picoseconds. - Weight::from_parts(15_745_460, 4212) - // Standard Error: 5_106 - .saturating_add(Weight::from_parts(229_475, 0).saturating_mul(t.into())) + // Minimum execution time: 13_972_000 picoseconds. + Weight::from_parts(14_269_356, 4212) + // Standard Error: 4_695 + .saturating_add(Weight::from_parts(205_433, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: Elections Members (r:1 w:0) - /// Proof Skipped: Elections Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Elections::Members` (r:1 w:0) + /// Proof: `Elections::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Tips::Reasons` (r:0 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[1, 13]`. fn close_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `786 + t * (112 Β±0)` // Estimated: `4242 + t * (112 Β±0)` - // Minimum execution time: 62_059_000 picoseconds. - Weight::from_parts(64_604_554, 4242) - // Standard Error: 11_818 - .saturating_add(Weight::from_parts(116_297, 0).saturating_mul(t.into())) + // Minimum execution time: 51_878_000 picoseconds. + Weight::from_parts(54_513_477, 4242) + // Standard Error: 12_281 + .saturating_add(Weight::from_parts(126_605, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) + /// Storage: `Tips::Tips` (r:1 w:1) + /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Tips::Reasons` (r:0 w:1) + /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[1, 13]`. fn slash_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `269` // Estimated: `3734` - // Minimum execution time: 14_133_000 picoseconds. - Weight::from_parts(14_957_547, 3734) - // Standard Error: 2_765 - .saturating_add(Weight::from_parts(22_138, 0).saturating_mul(t.into())) + // Minimum execution time: 11_800_000 picoseconds. + Weight::from_parts(12_520_984, 3734) + // Standard Error: 2_360 + .saturating_add(Weight::from_parts(28_777, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index 275e1c01f92..66e0bef1436 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -21,6 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } @@ -36,6 +37,7 @@ pallet-balances = { path = "../balances" } default = ["std"] std = [ "codec/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "pallet-balances/std", @@ -46,6 +48,13 @@ std = [ "sp-runtime/std", "sp-std/std", ] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml index 7640cc815e5..528f29e8e4a 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml @@ -19,6 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] # Substrate dependencies sp-runtime = { path = "../../../primitives/runtime", default-features = false } sp-std = { path = "../../../primitives/std", default-features = false } +frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } pallet-asset-conversion = { path = "../../asset-conversion", default-features = false } @@ -37,6 +38,7 @@ pallet-balances = { path = "../../balances" } default = ["std"] std = [ "codec/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "pallet-asset-conversion/std", @@ -50,6 +52,16 @@ std = [ "sp-std/std", "sp-storage/std", ] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-asset-conversion/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/README.md b/substrate/frame/transaction-payment/asset-conversion-tx-payment/README.md index eccba773673..fcd1527526e 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/README.md +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/README.md @@ -16,6 +16,6 @@ asset. ### Integration This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means you should include both pallets in your `construct_runtime` macro, but only include this -pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). +pallet's [`TransactionExtension`] ([`ChargeAssetTxPayment`]). License: Apache-2.0 diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs new file mode 100644 index 00000000000..0bffb991e4a --- /dev/null +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs @@ -0,0 +1,125 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for Asset Conversion Tx Payment Pallet's transaction extension + +use super::*; +use crate::Pallet; +use frame_benchmarking::v2::*; +use frame_support::{ + dispatch::{DispatchInfo, PostDispatchInfo}, + pallet_prelude::*, +}; +use frame_system::RawOrigin; +use sp_runtime::traits::{AsSystemOriginSigner, DispatchTransaction, Dispatchable}; + +#[benchmarks(where + T::RuntimeCall: Dispatchable, + AssetBalanceOf: Send + Sync, + BalanceOf: Send + + Sync + + From + + Into> + + Into> + + From>, + ChargeAssetIdOf: Send + Sync + Default, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn charge_asset_tx_payment_zero() { + let caller: T::AccountId = whitelisted_caller(); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(0u64.into(), None); + let inner = frame_system::Call::remark { remark: vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + weight: Weight::zero(), + class: DispatchClass::Normal, + pays_fee: Pays::No, + }; + let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Pays::No }; + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(post_info)) + .unwrap() + .is_ok()); + } + } + + #[benchmark] + fn charge_asset_tx_payment_native() { + let caller: T::AccountId = whitelisted_caller(); + let (fun_asset_id, _) = ::BenchmarkHelper::create_asset_id_parameter(1); + ::BenchmarkHelper::setup_balances_and_pool(fun_asset_id, caller.clone()); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(10u64.into(), None); + let inner = frame_system::Call::remark { remark: vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + weight: Weight::from_parts(10, 0), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(10, 0)), + pays_fee: Pays::Yes, + }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(post_info)) + .unwrap() + .is_ok()); + } + } + + #[benchmark] + fn charge_asset_tx_payment_asset() { + let caller: T::AccountId = whitelisted_caller(); + let (fun_asset_id, asset_id) = ::BenchmarkHelper::create_asset_id_parameter(1); + ::BenchmarkHelper::setup_balances_and_pool(fun_asset_id, caller.clone()); + + let tip = 10u64.into(); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(tip, Some(asset_id)); + let inner = frame_system::Call::remark { remark: vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + weight: Weight::from_parts(10, 0), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(10, 0)), + pays_fee: Pays::Yes, + }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 0, |_| Ok( + post_info + )) + .unwrap() + .is_ok()); + } + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Runtime); +} diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs index ed0ed56e6e0..8ef7c2ba38e 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs @@ -20,8 +20,8 @@ //! //! ## Overview //! -//! This pallet provides a `SignedExtension` with an optional `AssetId` that specifies the asset -//! to be used for payment (defaulting to the native token on `None`). It expects an +//! This pallet provides a `TransactionExtension` with an optional `AssetId` that specifies the +//! asset to be used for payment (defaulting to the native token on `None`). It expects an //! [`OnChargeAssetTransaction`] implementation analogous to [`pallet-transaction-payment`]. The //! included [`AssetConversionAdapter`] (implementing [`OnChargeAssetTransaction`]) determines the //! fee amount by converting the fee calculated by [`pallet-transaction-payment`] in the native @@ -31,7 +31,7 @@ //! //! This pallet does not have any dispatchable calls or storage. It wraps FRAME's Transaction //! Payment pallet and functions as a replacement. This means you should include both pallets in -//! your `construct_runtime` macro, but only include this pallet's [`SignedExtension`] +//! your `construct_runtime` macro, but only include this pallet's [`TransactionExtension`] //! ([`ChargeAssetTxPayment`]). //! //! ## Terminology @@ -53,23 +53,29 @@ use frame_support::{ }, DefaultNoBound, }; -use pallet_transaction_payment::OnChargeTransaction; +use pallet_transaction_payment::{ChargeTransactionPayment, OnChargeTransaction}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension, Zero}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, + TransactionExtension, TransactionExtensionBase, ValidateResult, Zero, }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, }; #[cfg(test)] mod mock; #[cfg(test)] mod tests; +pub mod weights; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; mod payment; -use frame_support::traits::tokens::AssetId; +use frame_support::{pallet_prelude::Weight, traits::tokens::AssetId}; pub use payment::*; +pub use weights::WeightInfo; /// Type aliases used for interaction with `OnChargeTransaction`. pub(crate) type OnChargeTransactionOf = @@ -125,11 +131,30 @@ pub mod pallet { type Fungibles: Balanced; /// The actual transaction charging logic that charges the fees. type OnChargeAssetTransaction: OnChargeAssetTransaction; + /// The weight information of this pallet. + type WeightInfo: WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + /// Benchmark helper + type BenchmarkHelper: BenchmarkHelperTrait< + Self::AccountId, + <::Fungibles as Inspect>::AssetId, + <::OnChargeAssetTransaction as OnChargeAssetTransaction>::AssetId, + >; } #[pallet::pallet] pub struct Pallet(_); + #[cfg(feature = "runtime-benchmarks")] + /// Helper trait to benchmark the `ChargeAssetTxPayment` transaction extension. + pub trait BenchmarkHelperTrait { + /// Returns the `AssetId` to be used in the liquidity pool by the benchmarking code. + fn create_asset_id_parameter(id: u32) -> (FunAssetIdParameter, AssetIdParameter); + /// Create a liquidity pool for a given asset and sufficiently endow accounts to benchmark + /// the extension. + fn setup_balances_and_pool(asset_id: FunAssetIdParameter, account: AccountId); + } + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -179,9 +204,8 @@ where who: &T::AccountId, call: &T::RuntimeCall, info: &DispatchInfoOf, - len: usize, + fee: BalanceOf, ) -> Result<(BalanceOf, InitialPayment), TransactionValidityError> { - let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); debug_assert!(self.tip <= fee, "tip should be included in the computed fee"); if fee.is_zero() { Ok((fee, InitialPayment::Nothing)) @@ -225,9 +249,8 @@ impl sp_std::fmt::Debug for ChargeAssetTxPayment { } } -impl SignedExtension for ChargeAssetTxPayment +impl TransactionExtensionBase for ChargeAssetTxPayment where - T::RuntimeCall: Dispatchable, AssetBalanceOf: Send + Sync, BalanceOf: Send + Sync @@ -238,109 +261,145 @@ where ChargeAssetIdOf: Send + Sync, { const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); + type Implicit = (); + + fn weight(&self) -> Weight { + if self.asset_id.is_some() { + ::WeightInfo::charge_asset_tx_payment_asset() + } else { + ::WeightInfo::charge_asset_tx_payment_native() + } + } +} + +impl TransactionExtension for ChargeAssetTxPayment +where + T::RuntimeCall: Dispatchable, + AssetBalanceOf: Send + Sync, + BalanceOf: Send + + Sync + + From + + Into> + + Into> + + From>, + ChargeAssetIdOf: Send + Sync, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, +{ + type Val = ( + // tip + BalanceOf, + // who paid the fee + T::AccountId, + // transaction fee + BalanceOf, + ); type Pre = ( // tip BalanceOf, // who paid the fee - Self::AccountId, + T::AccountId, // imbalance resulting from withdrawing the fee InitialPayment, // asset_id for the transaction payment Option>, ); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } - fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + _call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { - use pallet_transaction_payment::ChargeTransactionPayment; - let (fee, _) = self.withdraw_fee(who, call, info, len)?; + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + // Non-mutating call of `compute_fee` to calculate the fee used in the transaction priority. + let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); let priority = ChargeTransactionPayment::::get_priority(info, len, self.tip, fee); - Ok(ValidTransaction { priority, ..Default::default() }) + let validity = ValidTransaction { priority, ..Default::default() }; + let val = (self.tip, who.clone(), fee); + Ok((validity, val, origin)) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + val: Self::Val, + _origin: &::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + _len: usize, + _context: &Context, ) -> Result { - let (_fee, initial_payment) = self.withdraw_fee(who, call, info, len)?; - Ok((self.tip, who.clone(), initial_payment, self.asset_id)) + let (tip, who, fee) = val; + // Mutating call of `withdraw_fee` to actually charge for the transaction. + let (_fee, initial_payment) = self.withdraw_fee(&who, call, info, fee)?; + Ok((tip, who, initial_payment, self.asset_id.clone())) } fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, result: &DispatchResult, + _context: &Context, ) -> Result<(), TransactionValidityError> { - if let Some((tip, who, initial_payment, asset_id)) = pre { - match initial_payment { - InitialPayment::Native(already_withdrawn) => { - debug_assert!( - asset_id.is_none(), - "For that payment type the `asset_id` should be None" - ); - pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch( - Some((tip, who, already_withdrawn)), + let (tip, who, initial_payment, asset_id) = pre; + match initial_payment { + InitialPayment::Native(already_withdrawn) => { + debug_assert!( + asset_id.is_none(), + "For that payment type the `asset_id` should be None" + ); + pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch( + (tip, who, already_withdrawn), + info, + post_info, + len, + result, + &(), + )?; + }, + InitialPayment::Asset(already_withdrawn) => { + debug_assert!( + asset_id.is_some(), + "For that payment type the `asset_id` should be set" + ); + let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( + len as u32, info, post_info, tip, + ); + + if let Some(asset_id) = asset_id { + let (used_for_fee, received_exchanged, asset_consumed) = already_withdrawn; + let converted_fee = T::OnChargeAssetTransaction::correct_and_deposit_fee( + &who, info, post_info, - len, - result, + actual_fee.into(), + tip.into(), + used_for_fee.into(), + received_exchanged.into(), + asset_id.clone(), + asset_consumed.into(), )?; - }, - InitialPayment::Asset(already_withdrawn) => { - debug_assert!( - asset_id.is_some(), - "For that payment type the `asset_id` should be set" - ); - let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( - len as u32, info, post_info, tip, - ); - - if let Some(asset_id) = asset_id { - let (used_for_fee, received_exchanged, asset_consumed) = already_withdrawn; - let converted_fee = T::OnChargeAssetTransaction::correct_and_deposit_fee( - &who, - info, - post_info, - actual_fee.into(), - tip.into(), - used_for_fee.into(), - received_exchanged.into(), - asset_id.clone(), - asset_consumed.into(), - )?; - Pallet::::deposit_event(Event::::AssetTxFeePaid { - who, - actual_fee: converted_fee, - tip, - asset_id, - }); - } - }, - InitialPayment::Nothing => { - // `actual_fee` should be zero here for any signed extrinsic. It would be - // non-zero here in case of unsigned extrinsics as they don't pay fees but - // `compute_actual_fee` is not aware of them. In both cases it's fine to just - // move ahead without adjusting the fee, though, so we do nothing. - debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); - }, - } + Pallet::::deposit_event(Event::::AssetTxFeePaid { + who, + actual_fee: converted_fee, + tip, + asset_id, + }); + } + }, + InitialPayment::Nothing => { + // `actual_fee` should be zero here for any signed extrinsic. It would be + // non-zero here in case of unsigned extrinsics as they don't pay fees but + // `compute_actual_fee` is not aware of them. In both cases it's fine to just + // move ahead without adjusting the fee, though, so we do nothing. + debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); + }, } Ok(()) diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs index c8bf2eb8f44..853753d41cf 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs @@ -168,12 +168,12 @@ impl OnUnbalanced> for DealWithFees } } +#[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig as pallet_transaction_payment::DefaultConfig)] impl pallet_transaction_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = CurrencyAdapter; type WeightToFee = WeightToFee; type LengthToFee = TransactionByteFee; - type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = ConstU8<5>; } @@ -269,4 +269,72 @@ impl Config for Runtime { type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; type OnChargeAssetTransaction = AssetConversionAdapter; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = Helper; +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn new_test_ext() -> sp_io::TestExternalities { + let base_weight = 5; + let balance_factor = 100; + crate::tests::ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct Helper; + +#[cfg(feature = "runtime-benchmarks")] +impl BenchmarkHelperTrait for Helper { + fn create_asset_id_parameter(id: u32) -> (u32, u32) { + (id, id) + } + + fn setup_balances_and_pool(asset_id: u32, account: u64) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + use sp_runtime::traits::StaticLookup; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + 42, /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = 12; + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), lp_provider, u64::MAX / 2)); + let lp_provider_account = ::Lookup::unlookup(lp_provider); + assert_ok!(Assets::mint_into(asset_id.into(), &lp_provider_account, u64::MAX / 2)); + + let token_1 = Box::new(NativeOrWithId::Native); + let token_2 = Box::new(NativeOrWithId::WithId(asset_id)); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider), + token_1.clone(), + token_2.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider), + token_1, + token_2, + (u32::MAX / 8).into(), // 1 desired + u32::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider_account, + )); + + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&account, u32::MAX.into()); + + let beneficiary = ::Lookup::unlookup(account); + let balance = 1000; + + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, account), balance); + } } diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs index 62faed269d3..619077c0a5e 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs @@ -28,7 +28,10 @@ use frame_support::{ use frame_system as system; use mock::{ExtrinsicBaseWeight, *}; use pallet_balances::Call as BalancesCall; -use sp_runtime::{traits::StaticLookup, BuildStorage}; +use sp_runtime::{ + traits::{DispatchTransaction, StaticLookup}, + BuildStorage, +}; const CALL: &::RuntimeCall = &RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); @@ -160,33 +163,35 @@ fn transaction_payment_in_native_possible() { .build() .execute_with(|| { let len = 10; - let pre = ChargeAssetTxPayment::::from(0, None) - .pre_dispatch(&1, CALL, &info_from_weight(WEIGHT_5), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, None) + .validate_and_prepare(Some(1).into(), CALL, &info_from_weight(WEIGHT_5), len) .unwrap(); let initial_balance = 10 * balance_factor; assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(WEIGHT_5), &default_post_info(), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); - let pre = ChargeAssetTxPayment::::from(5 /* tipped */, None) - .pre_dispatch(&2, CALL, &info_from_weight(WEIGHT_100), len) + let (pre, _) = ChargeAssetTxPayment::::from(5 /* tipped */, None) + .validate_and_prepare(Some(2).into(), CALL, &info_from_weight(WEIGHT_100), len) .unwrap(); let initial_balance_for_2 = 20 * balance_factor; assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 5); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(WEIGHT_100), &post_info_from_weight(WEIGHT_50), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 50 - 5); }); @@ -237,8 +242,8 @@ fn transaction_payment_in_asset_possible() { let fee_in_asset = input_quote.unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_5), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_5), len) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -247,11 +252,12 @@ fn transaction_payment_in_asset_possible() { assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(WEIGHT_5), // estimated tx weight &default_post_info(), // weight actually used == estimated len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); @@ -289,12 +295,8 @@ fn transaction_payment_in_asset_fails_if_no_pool_for_that_asset() { assert_eq!(Assets::balance(asset_id, caller), balance); let len = 10; - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)).pre_dispatch( - &caller, - CALL, - &info_from_weight(WEIGHT_5), - len, - ); + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_5), len); // As there is no pool in the dex set up for this asset, conversion should fail. assert!(pre.is_err()); @@ -344,8 +346,8 @@ fn transaction_payment_without_fee() { assert_eq!(input_quote, Some(201)); let fee_in_asset = input_quote.unwrap(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_5), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_5), len) .unwrap(); // assert that native balance is not used @@ -363,11 +365,12 @@ fn transaction_payment_without_fee() { assert_eq!(refund, 199); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(WEIGHT_5), &post_info_from_pays(Pays::No), len, - &Ok(()) + &Ok(()), + &() )); // caller should get refunded @@ -419,8 +422,8 @@ fn asset_transaction_payment_with_tip_and_refund() { assert_eq!(input_quote, Some(1206)); let fee_in_asset = input_quote.unwrap(); - let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_100), len) + let (pre, _) = ChargeAssetTxPayment::::from(tip, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_100), len) .unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); @@ -435,11 +438,12 @@ fn asset_transaction_payment_with_tip_and_refund() { .unwrap(); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(WEIGHT_100), &post_info_from_weight(WEIGHT_50), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(TipUnbalancedAmount::get(), tip); @@ -500,8 +504,8 @@ fn payment_from_account_with_only_assets() { .unwrap(); assert_eq!(fee_in_asset, 301); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_5), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_5), len) .unwrap(); assert_eq!(Balances::free_balance(caller), ed); // check that fee was charged in the given asset @@ -516,11 +520,12 @@ fn payment_from_account_with_only_assets() { .unwrap(); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(WEIGHT_5), &default_post_info(), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset + refund); assert_eq!(Balances::free_balance(caller), 0); @@ -565,18 +570,19 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { // there will be no conversion when the fee is zero { - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len) .unwrap(); // `Pays::No` implies there are no fees assert_eq!(Assets::balance(asset_id, caller), balance); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_pays(Pays::No), &post_info_from_pays(Pays::No), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance); } @@ -591,17 +597,23 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { ) .unwrap(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(Weight::from_parts(weight, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); }); @@ -641,8 +653,8 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { // calculated fee is greater than 0 assert!(fee > 0); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len) .unwrap(); // `Pays::No` implies no pre-dispatch fees @@ -658,62 +670,12 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the // initial fee) assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_pays(Pays::No), &post_info_from_pays(Pays::Yes), len, - &Ok(()) - )); - assert_eq!(Assets::balance(asset_id, caller), balance); - }); -} - -#[test] -fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { - let base_weight = 1; - ExtBuilder::default() - .balance_factor(100) - .base_weight(Weight::from_parts(base_weight, 0)) - .build() - .execute_with(|| { - // create the asset - let asset_id = 1; - let min_balance = 100; - assert_ok!(Assets::force_create( - RuntimeOrigin::root(), - asset_id.into(), - 42, /* owner */ - true, /* is_sufficient */ - min_balance - )); - - // mint into the caller account - let caller = 333; - let beneficiary = ::Lookup::unlookup(caller); - let balance = 1000; - - assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); - assert_eq!(Assets::balance(asset_id, caller), balance); - - let weight = 1; - let len = 1; - ChargeAssetTxPayment::::pre_dispatch_unsigned( - CALL, - &info_from_weight(Weight::from_parts(weight, 0)), - len, - ) - .unwrap(); - - assert_eq!(Assets::balance(asset_id, caller), balance); - - // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the - // initial fee) - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - None, - &info_from_weight(Weight::from_parts(weight, 0)), - &post_info_from_pays(Pays::Yes), - len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance); }); diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs new file mode 100644 index 00000000000..f95e49f8073 --- /dev/null +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs @@ -0,0 +1,150 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_asset_conversion_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_asset_conversion_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_asset_conversion_tx_payment`. +pub trait WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight; + fn charge_asset_tx_payment_native() -> Weight; + fn charge_asset_tx_payment_asset() -> Weight; +} + +/// Weights for `pallet_asset_conversion_tx_payment` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 628_000 picoseconds. + Weight::from_parts(694_000, 0) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 34_410_000 picoseconds. + Weight::from_parts(35_263_000, 1733) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `888` + // Estimated: `6208` + // Minimum execution time: 112_432_000 picoseconds. + Weight::from_parts(113_992_000, 6208) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 628_000 picoseconds. + Weight::from_parts(694_000, 0) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 34_410_000 picoseconds. + Weight::from_parts(35_263_000, 1733) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `888` + // Estimated: `6208` + // Minimum execution time: 112_432_000 picoseconds. + Weight::from_parts(113_992_000, 6208) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } +} diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index 1da3237df08..06d9c30792e 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -66,6 +66,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] try-runtime = [ diff --git a/substrate/frame/transaction-payment/asset-tx-payment/README.md b/substrate/frame/transaction-payment/asset-tx-payment/README.md index fc860347d85..933ce13b0ee 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/README.md +++ b/substrate/frame/transaction-payment/asset-tx-payment/README.md @@ -16,6 +16,6 @@ asset. ### Integration This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means you should include both pallets in your `construct_runtime` macro, but only include this -pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). +pallet's [`TransactionExtension`] ([`ChargeAssetTxPayment`]). License: Apache-2.0 diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs new file mode 100644 index 00000000000..17e96e2b025 --- /dev/null +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs @@ -0,0 +1,123 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for Asset Tx Payment Pallet's transaction extension + +use super::*; +use crate::Pallet; +use frame_benchmarking::v2::*; +use frame_support::{ + dispatch::{DispatchInfo, PostDispatchInfo}, + pallet_prelude::*, +}; +use frame_system::RawOrigin; +use sp_runtime::traits::{AsSystemOriginSigner, DispatchTransaction, Dispatchable}; + +#[benchmarks(where + T::RuntimeCall: Dispatchable, + AssetBalanceOf: Send + Sync, + BalanceOf: Send + Sync + From + IsType>, + ChargeAssetIdOf: Send + Sync, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, + Credit: IsType>, +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn charge_asset_tx_payment_zero() { + let caller: T::AccountId = whitelisted_caller(); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(0u32.into(), None); + let inner = frame_system::Call::remark { remark: vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + weight: Weight::zero(), + class: DispatchClass::Normal, + pays_fee: Pays::No, + }; + let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Pays::No }; + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(post_info)) + .unwrap() + .is_ok()); + } + } + + #[benchmark] + fn charge_asset_tx_payment_native() { + let caller: T::AccountId = whitelisted_caller(); + let (fun_asset_id, _) = ::BenchmarkHelper::create_asset_id_parameter(1); + ::BenchmarkHelper::setup_balances_and_pool(fun_asset_id, caller.clone()); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(10u32.into(), None); + let inner = frame_system::Call::remark { remark: vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + weight: Weight::from_parts(10, 0), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(10, 0)), + pays_fee: Pays::Yes, + }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(post_info)) + .unwrap() + .is_ok()); + } + } + + #[benchmark] + fn charge_asset_tx_payment_asset() { + let caller: T::AccountId = whitelisted_caller(); + let (fun_asset_id, asset_id) = ::BenchmarkHelper::create_asset_id_parameter(1); + ::BenchmarkHelper::setup_balances_and_pool( + fun_asset_id.clone(), + caller.clone(), + ); + let tip = 10u32.into(); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(tip, Some(asset_id)); + let inner = frame_system::Call::remark { remark: vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + weight: Weight::from_parts(10, 0), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(10, 0)), + pays_fee: Pays::Yes, + }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 0, |_| Ok( + post_info + )) + .unwrap() + .is_ok()); + } + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Runtime); +} diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs index 753fae747a3..991b5f31406 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs @@ -31,7 +31,7 @@ //! This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means //! you should include both pallets in your `construct_runtime` macro, but only include this -//! pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). +//! pallet's [`TransactionExtension`] ([`ChargeAssetTxPayment`]). #![cfg_attr(not(feature = "std"), no_std)] @@ -40,6 +40,7 @@ use sp_std::prelude::*; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, DispatchResult, PostDispatchInfo}, + pallet_prelude::Weight, traits::{ tokens::{ fungibles::{Balanced, Credit, Inspect}, @@ -52,10 +53,11 @@ use frame_support::{ use pallet_transaction_payment::OnChargeTransaction; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension, Zero}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, + TransactionExtension, TransactionExtensionBase, Zero, }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, }; #[cfg(test)] @@ -63,8 +65,14 @@ mod mock; #[cfg(test)] mod tests; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + mod payment; +pub mod weights; + pub use payment::*; +pub use weights::WeightInfo; /// Type aliases used for interaction with `OnChargeTransaction`. pub(crate) type OnChargeTransactionOf = @@ -120,11 +128,30 @@ pub mod pallet { type Fungibles: Balanced; /// The actual transaction charging logic that charges the fees. type OnChargeAssetTransaction: OnChargeAssetTransaction; + /// The weight information of this pallet. + type WeightInfo: WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + /// Benchmark helper + type BenchmarkHelper: BenchmarkHelperTrait< + Self::AccountId, + <::Fungibles as Inspect>::AssetId, + <::OnChargeAssetTransaction as OnChargeAssetTransaction>::AssetId, + >; } #[pallet::pallet] pub struct Pallet(_); + #[cfg(feature = "runtime-benchmarks")] + /// Helper trait to benchmark the `ChargeAssetTxPayment` transaction extension. + pub trait BenchmarkHelperTrait { + /// Returns the `AssetId` to be used in the liquidity pool by the benchmarking code. + fn create_asset_id_parameter(id: u32) -> (FunAssetIdParameter, AssetIdParameter); + /// Create a liquidity pool for a given asset and sufficiently endow accounts to benchmark + /// the extension. + fn setup_balances_and_pool(asset_id: FunAssetIdParameter, account: AccountId); + } + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -172,9 +199,8 @@ where who: &T::AccountId, call: &T::RuntimeCall, info: &DispatchInfoOf, - len: usize, + fee: BalanceOf, ) -> Result<(BalanceOf, InitialPayment), TransactionValidityError> { - let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); debug_assert!(self.tip <= fee, "tip should be included in the computed fee"); if fee.is_zero() { Ok((fee, InitialPayment::Nothing)) @@ -209,104 +235,139 @@ impl sp_std::fmt::Debug for ChargeAssetTxPayment { } } -impl SignedExtension for ChargeAssetTxPayment +impl TransactionExtensionBase for ChargeAssetTxPayment where - T::RuntimeCall: Dispatchable, AssetBalanceOf: Send + Sync, BalanceOf: Send + Sync + From + IsType>, ChargeAssetIdOf: Send + Sync, Credit: IsType>, { const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); + type Implicit = (); + + fn weight(&self) -> Weight { + if self.asset_id.is_some() { + ::WeightInfo::charge_asset_tx_payment_asset() + } else { + ::WeightInfo::charge_asset_tx_payment_native() + } + } +} + +impl TransactionExtension for ChargeAssetTxPayment +where + T::RuntimeCall: Dispatchable, + AssetBalanceOf: Send + Sync, + BalanceOf: Send + Sync + From + IsType>, + ChargeAssetIdOf: Send + Sync, + Credit: IsType>, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, +{ + type Val = ( + // tip + BalanceOf, + // who paid the fee + T::AccountId, + // transaction fee + BalanceOf, + ); type Pre = ( // tip BalanceOf, // who paid the fee - Self::AccountId, + T::AccountId, // imbalance resulting from withdrawing the fee InitialPayment, // asset_id for the transaction payment Option>, ); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } - fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + _call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { use pallet_transaction_payment::ChargeTransactionPayment; - let (fee, _) = self.withdraw_fee(who, call, info, len)?; + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + // Non-mutating call of `compute_fee` to calculate the fee used in the transaction priority. + let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); let priority = ChargeTransactionPayment::::get_priority(info, len, self.tip, fee); - Ok(ValidTransaction { priority, ..Default::default() }) + let val = (self.tip, who.clone(), fee); + let validity = ValidTransaction { priority, ..Default::default() }; + Ok((validity, val, origin)) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + val: Self::Val, + _origin: &::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + _len: usize, + _context: &Context, ) -> Result { - let (_fee, initial_payment) = self.withdraw_fee(who, call, info, len)?; - Ok((self.tip, who.clone(), initial_payment, self.asset_id)) + let (tip, who, fee) = val; + // Mutating call of `withdraw_fee` to actually charge for the transaction. + let (_fee, initial_payment) = self.withdraw_fee(&who, call, info, fee)?; + Ok((tip, who, initial_payment, self.asset_id)) } fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, result: &DispatchResult, + _context: &Context, ) -> Result<(), TransactionValidityError> { - if let Some((tip, who, initial_payment, asset_id)) = pre { - match initial_payment { - InitialPayment::Native(already_withdrawn) => { - pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch( - Some((tip, who, already_withdrawn)), + let (tip, who, initial_payment, asset_id) = pre; + match initial_payment { + InitialPayment::Native(already_withdrawn) => { + pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch( + (tip, who, already_withdrawn), + info, + post_info, + len, + result, + &(), + )?; + }, + InitialPayment::Asset(already_withdrawn) => { + let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( + len as u32, info, post_info, tip, + ); + + let (converted_fee, converted_tip) = + T::OnChargeAssetTransaction::correct_and_deposit_fee( + &who, info, post_info, - len, - result, + actual_fee.into(), + tip.into(), + already_withdrawn.into(), )?; - }, - InitialPayment::Asset(already_withdrawn) => { - let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( - len as u32, info, post_info, tip, - ); - - let (converted_fee, converted_tip) = - T::OnChargeAssetTransaction::correct_and_deposit_fee( - &who, - info, - post_info, - actual_fee.into(), - tip.into(), - already_withdrawn.into(), - )?; - Pallet::::deposit_event(Event::::AssetTxFeePaid { - who, - actual_fee: converted_fee, - tip: converted_tip, - asset_id, - }); - }, - InitialPayment::Nothing => { - // `actual_fee` should be zero here for any signed extrinsic. It would be - // non-zero here in case of unsigned extrinsics as they don't pay fees but - // `compute_actual_fee` is not aware of them. In both cases it's fine to just - // move ahead without adjusting the fee, though, so we do nothing. - debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); - }, - } + Pallet::::deposit_event(Event::::AssetTxFeePaid { + who, + actual_fee: converted_fee, + tip: converted_tip, + asset_id, + }); + }, + InitialPayment::Nothing => { + // `actual_fee` should be zero here for any signed extrinsic. It would be + // non-zero here in case of unsigned extrinsics as they don't pay fees but + // `compute_actual_fee` is not aware of them. In both cases it's fine to just + // move ahead without adjusting the fee, though, so we do nothing. + debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); + }, } Ok(()) diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs index 1f335b4f6c4..bb484795e82 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs @@ -136,12 +136,12 @@ impl WeightToFeeT for TransactionByteFee { } } +#[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig as pallet_transaction_payment::DefaultConfig)] impl pallet_transaction_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = CurrencyAdapter; type WeightToFee = WeightToFee; type LengthToFee = TransactionByteFee; - type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = ConstU8<5>; } @@ -205,4 +205,56 @@ impl Config for Runtime { pallet_assets::BalanceToAssetBalance, CreditToBlockAuthor, >; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = Helper; +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn new_test_ext() -> sp_io::TestExternalities { + let base_weight = 5; + let balance_factor = 100; + crate::tests::ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct Helper; + +#[cfg(feature = "runtime-benchmarks")] +impl BenchmarkHelperTrait for Helper { + fn create_asset_id_parameter(id: u32) -> (u32, u32) { + (id.into(), id.into()) + } + + fn setup_balances_and_pool(asset_id: u32, account: u64) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + use sp_runtime::traits::StaticLookup; + let min_balance = 1; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + 42, /* owner */ + true, /* is_sufficient */ + min_balance + )); + + // mint into the caller account + let caller = 2; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 1000; + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&account, u32::MAX.into()); + + let beneficiary = ::Lookup::unlookup(account); + let balance = 1000; + + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, account), balance); + } } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs index 8df98ceda99..42b8f2cf5fa 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -25,7 +25,10 @@ use frame_support::{ use frame_system as system; use mock::{ExtrinsicBaseWeight, *}; use pallet_balances::Call as BalancesCall; -use sp_runtime::{traits::StaticLookup, BuildStorage}; +use sp_runtime::{ + traits::{DispatchTransaction, StaticLookup}, + BuildStorage, +}; const CALL: &::RuntimeCall = &RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); @@ -116,33 +119,45 @@ fn transaction_payment_in_native_possible() { .build() .execute_with(|| { let len = 10; - let pre = ChargeAssetTxPayment::::from(0, None) - .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_parts(5, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, None) + .validate_and_prepare( + Some(1).into(), + CALL, + &info_from_weight(Weight::from_parts(5, 0)), + len, + ) .unwrap(); let initial_balance = 10 * balance_factor; assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(Weight::from_parts(5, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); - let pre = ChargeAssetTxPayment::::from(5 /* tipped */, None) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(5 /* tipped */, None) + .validate_and_prepare( + Some(2).into(), + CALL, + &info_from_weight(Weight::from_parts(100, 0)), + len, + ) .unwrap(); let initial_balance_for_2 = 20 * balance_factor; assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 5); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(Weight::from_parts(100, 0)), &post_info_from_weight(Weight::from_parts(50, 0)), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 50 - 5); }); @@ -179,8 +194,13 @@ fn transaction_payment_in_asset_possible() { // we convert the from weight to fee based on the ratio between asset min balance and // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -189,11 +209,12 @@ fn transaction_payment_in_asset_possible() { assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), 0); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(Weight::from_parts(weight, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance - fee); // check that the block author gets rewarded @@ -232,8 +253,13 @@ fn transaction_payment_without_fee() { // we convert the from weight to fee based on the ratio between asset min balance and // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -242,11 +268,12 @@ fn transaction_payment_without_fee() { assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), 0); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(Weight::from_parts(weight, 0)), &post_info_from_pays(Pays::No), len, - &Ok(()) + &Ok(()), + &() )); // caller should be refunded assert_eq!(Assets::balance(asset_id, caller), balance); @@ -287,18 +314,24 @@ fn asset_transaction_payment_with_tip_and_refund() { // existential deposit let fee_with_tip = (base_weight + weight + len as u64 + tip) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(tip, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_with_tip); let final_weight = 50; assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(Weight::from_parts(weight, 0)), &post_info_from_weight(Weight::from_parts(final_weight, 0)), len, - &Ok(()) + &Ok(()), + &() )); let final_fee = fee_with_tip - (weight - final_weight) * min_balance / ExistentialDeposit::get(); @@ -339,19 +372,25 @@ fn payment_from_account_with_only_assets() { // we convert the from weight to fee based on the ratio between asset min balance and // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); assert_eq!(Balances::free_balance(caller), 0); // check that fee was charged in the given asset assert_eq!(Assets::balance(asset_id, caller), balance - fee); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(Weight::from_parts(weight, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance - fee); assert_eq!(Balances::free_balance(caller), 0); @@ -372,7 +411,12 @@ fn payment_only_with_existing_sufficient_asset() { let len = 10; // pre_dispatch fails for non-existent asset assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len + ) .is_err()); // create the non-sufficient asset @@ -386,7 +430,12 @@ fn payment_only_with_existing_sufficient_asset() { )); // pre_dispatch fails for non-sufficient asset assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len + ) .is_err()); }); } @@ -424,33 +473,40 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { // naive fee calculation would round down to zero assert_eq!(fee, 0); { - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len) .unwrap(); // `Pays::No` still implies no fees assert_eq!(Assets::balance(asset_id, caller), balance); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_pays(Pays::No), &post_info_from_pays(Pays::No), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance); } - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); // check that at least one coin was charged in the given asset assert_eq!(Assets::balance(asset_id, caller), balance - 1); assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_weight(Weight::from_parts(weight, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance - 1); }); @@ -488,8 +544,8 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); // calculated fee is greater than 0 assert!(fee > 0); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len) .unwrap(); // `Pays::No` implies no pre-dispatch fees assert_eq!(Assets::balance(asset_id, caller), balance); @@ -503,60 +559,12 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the // initial fee) assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + pre, &info_from_pays(Pays::No), &post_info_from_pays(Pays::Yes), len, - &Ok(()) - )); - assert_eq!(Assets::balance(asset_id, caller), balance); - }); -} - -#[test] -fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { - let base_weight = 1; - ExtBuilder::default() - .balance_factor(100) - .base_weight(Weight::from_parts(base_weight, 0)) - .build() - .execute_with(|| { - // create the asset - let asset_id = 1; - let min_balance = 100; - assert_ok!(Assets::force_create( - RuntimeOrigin::root(), - asset_id.into(), - 42, /* owner */ - true, /* is_sufficient */ - min_balance - )); - - // mint into the caller account - let caller = 333; - let beneficiary = ::Lookup::unlookup(caller); - let balance = 100; - assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); - assert_eq!(Assets::balance(asset_id, caller), balance); - let weight = 1; - let len = 1; - ChargeAssetTxPayment::::pre_dispatch_unsigned( - CALL, - &info_from_weight(Weight::from_parts(weight, 0)), - len, - ) - .unwrap(); - - assert_eq!(Assets::balance(asset_id, caller), balance); - - // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the - // initial fee) - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - None, - &info_from_weight(Weight::from_parts(weight, 0)), - &post_info_from_pays(Pays::Yes), - len, - &Ok(()) + &Ok(()), + &() )); assert_eq!(Assets::balance(asset_id, caller), balance); }); diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs new file mode 100644 index 00000000000..1af1c94177d --- /dev/null +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs @@ -0,0 +1,146 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_asset_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_asset_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_asset_tx_payment`. +pub trait WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight; + fn charge_asset_tx_payment_native() -> Weight; + fn charge_asset_tx_payment_asset() -> Weight; +} + +/// Weights for `pallet_asset_tx_payment` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 542_000 picoseconds. + Weight::from_parts(597_000, 0) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 33_162_000 picoseconds. + Weight::from_parts(34_716_000, 1733) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `747` + // Estimated: `3675` + // Minimum execution time: 44_230_000 picoseconds. + Weight::from_parts(45_297_000, 3675) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 542_000 picoseconds. + Weight::from_parts(597_000, 0) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 33_162_000 picoseconds. + Weight::from_parts(34_716_000, 1733) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `747` + // Estimated: `3675` + // Minimum execution time: 44_230_000 picoseconds. + Weight::from_parts(45_297_000, 3675) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } +} diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs index 6c34c26ce92..9810ea08c79 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs @@ -21,7 +21,7 @@ //! //! ## Overview //! -//! It does this by wrapping an existing [`SignedExtension`] implementation (e.g. +//! It does this by wrapping an existing [`TransactionExtension`] implementation (e.g. //! [`pallet-transaction-payment`]) and checking if the dispatchable is feeless before applying the //! wrapped extension. If the dispatchable is indeed feeless, the extension is skipped and a custom //! event is emitted instead. Otherwise, the extension is applied as usual. @@ -31,7 +31,8 @@ //! //! This pallet wraps an existing transaction payment pallet. This means you should both pallets //! in your `construct_runtime` macro and include this pallet's -//! [`SignedExtension`] ([`SkipCheckIfFeeless`]) that would accept the existing one as an argument. +//! [`TransactionExtension`] ([`SkipCheckIfFeeless`]) that would accept the existing one as an +//! argument. #![cfg_attr(not(feature = "std"), no_std)] @@ -42,7 +43,10 @@ use frame_support::{ }; use scale_info::{StaticTypeInfo, TypeInfo}; use sp_runtime::{ - traits::{DispatchInfoOf, PostDispatchInfoOf, SignedExtension}, + traits::{ + DispatchInfoOf, OriginOf, PostDispatchInfoOf, TransactionExtension, + TransactionExtensionBase, ValidateResult, + }, transaction_validity::TransactionValidityError, }; @@ -70,11 +74,11 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// A transaction fee was skipped. - FeeSkipped { who: T::AccountId }, + FeeSkipped { origin: ::PalletsOrigin }, } } -/// A [`SignedExtension`] that skips the wrapped extension if the dispatchable is feeless. +/// A [`TransactionExtension`] that skips the wrapped extension if the dispatchable is feeless. #[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct SkipCheckIfFeeless(pub S, sp_std::marker::PhantomData); @@ -103,53 +107,96 @@ impl From for SkipCheckIfFeeless { } } -impl> SignedExtension +pub enum Intermediate { + /// The wrapped extension should be applied. + Apply(T), + /// The wrapped extension should be skipped. + Skip(O), +} +use Intermediate::*; + +impl TransactionExtensionBase for SkipCheckIfFeeless -where - S::Call: CheckIfFeeless>, { - type AccountId = T::AccountId; - type Call = S::Call; - type AdditionalSigned = S::AdditionalSigned; - type Pre = (Self::AccountId, Option<::Pre>); // From the outside this extension should be "invisible", because it just extends the wrapped // extension with an extra check in `pre_dispatch` and `post_dispatch`. Thus, we should forward // the identifier of the wrapped extension to let wallets see this extension as it would only be // the wrapped extension itself. const IDENTIFIER: &'static str = S::IDENTIFIER; + type Implicit = S::Implicit; + + fn implicit(&self) -> Result { + self.0.implicit() + } - fn additional_signed(&self) -> Result { - self.0.additional_signed() + fn weight(&self) -> frame_support::weights::Weight { + self.0.weight() } +} - fn pre_dispatch( +impl> + TransactionExtension for SkipCheckIfFeeless +where + T::RuntimeCall: CheckIfFeeless>, +{ + type Val = Intermediate as OriginTrait>::PalletsOrigin>; + type Pre = Intermediate as OriginTrait>::PalletsOrigin>; + + fn validate( + &self, + origin: OriginOf, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + context: &mut Context, + self_implicit: S::Implicit, + inherited_implication: &impl Encode, + ) -> ValidateResult { + if call.is_feeless(&origin) { + Ok((Default::default(), Skip(origin.caller().clone()), origin)) + } else { + let (x, y, z) = self.0.validate( + origin, + call, + info, + len, + context, + self_implicit, + inherited_implication, + )?; + Ok((x, Apply(y), z)) + } + } + + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + val: Self::Val, + origin: &OriginOf, + call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, + context: &Context, ) -> Result { - if call.is_feeless(&::RuntimeOrigin::signed(who.clone())) { - Ok((who.clone(), None)) - } else { - Ok((who.clone(), Some(self.0.pre_dispatch(who, call, info, len)?))) + match val { + Apply(val) => self.0.prepare(val, origin, call, info, len, context).map(Apply), + Skip(origin) => Ok(Skip(origin)), } } fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, result: &DispatchResult, + context: &Context, ) -> Result<(), TransactionValidityError> { - if let Some(pre) = pre { - if let Some(pre) = pre.1 { - S::post_dispatch(Some(pre), info, post_info, len, result)?; - } else { - Pallet::::deposit_event(Event::::FeeSkipped { who: pre.0 }); - } + match pre { + Apply(pre) => S::post_dispatch(pre, info, post_info, len, result, context), + Skip(origin) => { + Pallet::::deposit_event(Event::::FeeSkipped { origin }); + Ok(()) + }, } - Ok(()) } } diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs index 17c4c773997..06948de4c3c 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs @@ -18,9 +18,12 @@ use crate as pallet_skip_feeless_payment; use frame_support::{derive_impl, parameter_types}; use frame_system as system; +use sp_runtime::{ + impl_tx_ext_default, + traits::{OriginOf, TransactionExtension}, +}; type Block = frame_system::mocking::MockBlock; -type AccountId = u64; #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { @@ -38,21 +41,22 @@ parameter_types! { #[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] pub struct DummyExtension; -impl SignedExtension for DummyExtension { - type AccountId = AccountId; - type Call = RuntimeCall; - type AdditionalSigned = (); - type Pre = (); +impl TransactionExtensionBase for DummyExtension { const IDENTIFIER: &'static str = "DummyExtension"; - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } - fn pre_dispatch( + type Implicit = (); +} +impl TransactionExtension for DummyExtension { + type Val = (); + type Pre = (); + impl_tx_ext_default!(RuntimeCall; C; validate); + fn prepare( self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, + _val: Self::Val, + _origin: &OriginOf, + _call: &RuntimeCall, + _info: &DispatchInfoOf, _len: usize, + _context: &C, ) -> Result { PreDispatchCount::mutate(|c| *c += 1); Ok(()) diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs index 4b4dd699741..96b4af7e203 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs @@ -16,18 +16,19 @@ use super::*; use crate::mock::{pallet_dummy::Call, DummyExtension, PreDispatchCount, Runtime, RuntimeCall}; use frame_support::dispatch::DispatchInfo; +use sp_runtime::traits::DispatchTransaction; #[test] fn skip_feeless_payment_works() { let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); SkipCheckIfFeeless::::from(DummyExtension) - .pre_dispatch(&0, &call, &DispatchInfo::default(), 0) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0) .unwrap(); assert_eq!(PreDispatchCount::get(), 1); let call = RuntimeCall::DummyPallet(Call::::aux { data: 0 }); SkipCheckIfFeeless::::from(DummyExtension) - .pre_dispatch(&0, &call, &DispatchInfo::default(), 0) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0) .unwrap(); assert_eq!(PreDispatchCount::get(), 1); } diff --git a/substrate/frame/transaction-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/src/benchmarking.rs new file mode 100644 index 00000000000..d63c5a7d746 --- /dev/null +++ b/substrate/frame/transaction-payment/src/benchmarking.rs @@ -0,0 +1,81 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for Transaction Payment Pallet's transaction extension + +use super::*; +use crate::Pallet; +use frame_benchmarking::v2::*; +use frame_support::dispatch::{DispatchInfo, PostDispatchInfo}; +use frame_system::{EventRecord, RawOrigin}; +use sp_runtime::traits::{DispatchTransaction, Dispatchable}; + +fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +#[benchmarks(where + T: Config, + T::RuntimeCall: Dispatchable, + BalanceOf: Send + Sync + From, +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn charge_transaction_payment() { + let caller: T::AccountId = whitelisted_caller(); + >::endow_account( + &caller, + >::minimum_balance() * 1000.into(), + ); + let tip = >::minimum_balance(); + let ext: ChargeTransactionPayment = ChargeTransactionPayment::from(tip); + let inner = frame_system::Call::remark { remark: vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + weight: Weight::from_parts(100, 0), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(10, 0)), + pays_fee: Pays::Yes, + }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 10, |_| Ok( + post_info + )) + .unwrap() + .is_ok()); + } + + let actual_fee = Pallet::::compute_actual_fee(10, &info, &post_info, tip); + assert_last_event::( + Event::::TransactionFeePaid { who: caller, actual_fee, tip }.into(), + ); + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Runtime); +} diff --git a/substrate/frame/transaction-payment/src/lib.rs b/substrate/frame/transaction-payment/src/lib.rs index efadfd60bdd..6b17616bdc7 100644 --- a/substrate/frame/transaction-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/src/lib.rs @@ -62,23 +62,28 @@ pub use payment::*; use sp_runtime::{ traits::{ Convert, DispatchInfoOf, Dispatchable, One, PostDispatchInfoOf, SaturatedConversion, - Saturating, SignedExtension, Zero, + Saturating, TransactionExtension, TransactionExtensionBase, Zero, }, transaction_validity::{ - TransactionPriority, TransactionValidity, TransactionValidityError, ValidTransaction, + InvalidTransaction, TransactionPriority, TransactionValidityError, ValidTransaction, }, FixedPointNumber, FixedU128, Perbill, Perquintill, RuntimeDebug, }; use sp_std::prelude::*; pub use types::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; +pub use weights::WeightInfo; #[cfg(test)] mod mock; #[cfg(test)] mod tests; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + mod payment; mod types; +pub mod weights; /// Fee multiplier. pub type Multiplier = FixedU128; @@ -335,6 +340,7 @@ pub mod pallet { type RuntimeEvent = (); type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = (); + type WeightInfo = (); } } @@ -387,6 +393,9 @@ pub mod pallet { /// transactions. #[pallet::constant] type OperationalFeeMultiplier: Get; + + /// The weight information of this pallet. + type WeightInfo: WeightInfo; } #[pallet::type_value] @@ -507,11 +516,11 @@ impl Pallet { // a very very little potential gain in the future. let dispatch_info = ::get_dispatch_info(&unchecked_extrinsic); - let partial_fee = if unchecked_extrinsic.is_signed().unwrap_or(false) { - Self::compute_fee(len, &dispatch_info, 0u32.into()) - } else { - // Unsigned extrinsics have no partial fee. + let partial_fee = if unchecked_extrinsic.is_bare() { + // Bare extrinsics have no partial fee. 0u32.into() + } else { + Self::compute_fee(len, &dispatch_info, 0u32.into()) }; let DispatchInfo { weight, class, .. } = dispatch_info; @@ -531,11 +540,11 @@ impl Pallet { let tip = 0u32.into(); - if unchecked_extrinsic.is_signed().unwrap_or(false) { - Self::compute_fee_details(len, &dispatch_info, tip) - } else { - // Unsigned extrinsics have no inclusion fee. + if unchecked_extrinsic.is_bare() { + // Bare extrinsics have no inclusion fee. FeeDetails { inclusion_fee: None, tip } + } else { + Self::compute_fee_details(len, &dispatch_info, tip) } } @@ -714,7 +723,7 @@ where who: &T::AccountId, call: &T::RuntimeCall, info: &DispatchInfoOf, - len: usize, + fee: BalanceOf, ) -> Result< ( BalanceOf, @@ -723,7 +732,6 @@ where TransactionValidityError, > { let tip = self.0; - let fee = Pallet::::compute_fee(len as u32, info, tip); <::OnChargeTransaction as OnChargeTransaction>::withdraw_fee( who, call, info, fee, tip, @@ -731,6 +739,22 @@ where .map(|i| (fee, i)) } + fn can_withdraw_fee( + &self, + who: &T::AccountId, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> Result, TransactionValidityError> { + let tip = self.0; + let fee = Pallet::::compute_fee(len as u32, info, tip); + + <::OnChargeTransaction as OnChargeTransaction>::can_withdraw_fee( + who, call, info, fee, tip, + )?; + Ok(fee) + } + /// Get an appropriate priority for a transaction with the given `DispatchInfo`, encoded length /// and user-included tip. /// @@ -817,67 +841,93 @@ impl sp_std::fmt::Debug for ChargeTransactionPayment { } } -impl SignedExtension for ChargeTransactionPayment +impl TransactionExtensionBase for ChargeTransactionPayment { + const IDENTIFIER: &'static str = "ChargeTransactionPayment"; + type Implicit = (); + + fn weight(&self) -> Weight { + T::WeightInfo::charge_transaction_payment() + } +} + +impl TransactionExtension + for ChargeTransactionPayment where BalanceOf: Send + Sync + From, T::RuntimeCall: Dispatchable, { - const IDENTIFIER: &'static str = "ChargeTransactionPayment"; - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); + type Val = ( + // tip + BalanceOf, + // who paid the fee + T::AccountId, + // computed fee + BalanceOf, + ); type Pre = ( // tip BalanceOf, - // who paid the fee - this is an option to allow for a Default impl. - Self::AccountId, + // who paid the fee + T::AccountId, // imbalance resulting from withdrawing the fee <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, ); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { - let (final_fee, _) = self.withdraw_fee(who, call, info, len)?; + _context: &mut Context, + _: (), + _implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + let who = frame_system::ensure_signed(origin.clone()) + .map_err(|_| InvalidTransaction::BadSigner)?; + let final_fee = self.can_withdraw_fee(&who, call, info, len)?; let tip = self.0; - Ok(ValidTransaction { - priority: Self::get_priority(info, len, tip, final_fee), - ..Default::default() - }) + Ok(( + ValidTransaction { + priority: Self::get_priority(info, len, tip, final_fee), + ..Default::default() + }, + (self.0, who, final_fee), + origin, + )) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + val: Self::Val, + _origin: &::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + _len: usize, + _context: &Context, ) -> Result { - let (_fee, imbalance) = self.withdraw_fee(who, call, info, len)?; - Ok((self.0, who.clone(), imbalance)) + let (tip, who, fee) = val; + // Mutating call to `withdraw_fee` to actually charge for the transaction. + let (_final_fee, imbalance) = self.withdraw_fee(&who, call, info, fee)?; + Ok((tip, who, imbalance)) } fn post_dispatch( - maybe_pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + (tip, who, imbalance): Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, _result: &DispatchResult, + _context: &Context, ) -> Result<(), TransactionValidityError> { - if let Some((tip, who, imbalance)) = maybe_pre { - let actual_fee = Pallet::::compute_actual_fee(len as u32, info, post_info, tip); - T::OnChargeTransaction::correct_and_deposit_fee( - &who, info, post_info, actual_fee, tip, imbalance, - )?; - Pallet::::deposit_event(Event::::TransactionFeePaid { who, actual_fee, tip }); - } + let actual_fee = Pallet::::compute_actual_fee(len as u32, info, post_info, tip); + T::OnChargeTransaction::correct_and_deposit_fee( + &who, info, post_info, actual_fee, tip, imbalance, + )?; + Pallet::::deposit_event(Event::::TransactionFeePaid { who, actual_fee, tip }); Ok(()) } } diff --git a/substrate/frame/transaction-payment/src/mock.rs b/substrate/frame/transaction-payment/src/mock.rs index 1ca2e3d7347..eda234ffcae 100644 --- a/substrate/frame/transaction-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/src/mock.rs @@ -157,4 +157,14 @@ impl Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = TransactionByteFee; type FeeMultiplierUpdate = (); + type WeightInfo = (); +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn new_test_ext() -> sp_io::TestExternalities { + crate::tests::ExtBuilder::default() + .base_weight(Weight::from_parts(100, 0)) + .byte_fee(10) + .balance_factor(0) + .build() } diff --git a/substrate/frame/transaction-payment/src/payment.rs b/substrate/frame/transaction-payment/src/payment.rs index 886683f2e0b..a13e73b50b0 100644 --- a/substrate/frame/transaction-payment/src/payment.rs +++ b/substrate/frame/transaction-payment/src/payment.rs @@ -20,7 +20,7 @@ use crate::Config; use core::marker::PhantomData; use sp_runtime::{ - traits::{DispatchInfoOf, PostDispatchInfoOf, Saturating, Zero}, + traits::{CheckedSub, DispatchInfoOf, PostDispatchInfoOf, Saturating, Zero}, transaction_validity::InvalidTransaction, }; @@ -51,6 +51,17 @@ pub trait OnChargeTransaction { tip: Self::Balance, ) -> Result; + /// Check if the predicted fee from the transaction origin can be withdrawn. + /// + /// Note: The `fee` already includes the `tip`. + fn can_withdraw_fee( + who: &T::AccountId, + call: &T::RuntimeCall, + dispatch_info: &DispatchInfoOf, + fee: Self::Balance, + tip: Self::Balance, + ) -> Result<(), TransactionValidityError>; + /// After the transaction was executed the actual fee can be calculated. /// This function should refund any overpaid fees and optionally deposit /// the corrected amount. @@ -64,6 +75,12 @@ pub trait OnChargeTransaction { tip: Self::Balance, already_withdrawn: Self::LiquidityInfo, ) -> Result<(), TransactionValidityError>; + + #[cfg(feature = "runtime-benchmarks")] + fn endow_account(who: &T::AccountId, amount: Self::Balance); + + #[cfg(feature = "runtime-benchmarks")] + fn minimum_balance() -> Self::Balance; } /// Implements the transaction payment for a pallet implementing the `Currency` @@ -121,6 +138,33 @@ where } } + /// Check if the predicted fee from the transaction origin can be withdrawn. + /// + /// Note: The `fee` already includes the `tip`. + fn can_withdraw_fee( + who: &T::AccountId, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + fee: Self::Balance, + tip: Self::Balance, + ) -> Result<(), TransactionValidityError> { + if fee.is_zero() { + return Ok(()) + } + + let withdraw_reason = if tip.is_zero() { + WithdrawReasons::TRANSACTION_PAYMENT + } else { + WithdrawReasons::TRANSACTION_PAYMENT | WithdrawReasons::TIP + }; + + let new_balance = + C::free_balance(who).checked_sub(&fee).ok_or(InvalidTransaction::Payment)?; + C::ensure_can_withdraw(who, fee, withdraw_reason, new_balance) + .map(|_| ()) + .map_err(|_| InvalidTransaction::Payment.into()) + } + /// Hand the fee and the tip over to the `[OnUnbalanced]` implementation. /// Since the predicted fee might have been too high, parts of the fee may /// be refunded. @@ -153,4 +197,14 @@ where } Ok(()) } + + #[cfg(feature = "runtime-benchmarks")] + fn endow_account(who: &T::AccountId, amount: Self::Balance) { + let _ = C::deposit_creating(who, amount); + } + + #[cfg(feature = "runtime-benchmarks")] + fn minimum_balance() -> Self::Balance { + C::minimum_balance() + } } diff --git a/substrate/frame/transaction-payment/src/tests.rs b/substrate/frame/transaction-payment/src/tests.rs index d3a1721ccb9..7a49f8111e2 100644 --- a/substrate/frame/transaction-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/src/tests.rs @@ -21,11 +21,14 @@ use crate as pallet_transaction_payment; use codec::Encode; use sp_runtime::{ - testing::TestXt, traits::One, transaction_validity::InvalidTransaction, BuildStorage, + generic::UncheckedExtrinsic, + traits::{DispatchTransaction, One}, + transaction_validity::InvalidTransaction, + BuildStorage, }; use frame_support::{ - assert_noop, assert_ok, + assert_ok, dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo}, traits::Currency, weights::Weight, @@ -128,44 +131,37 @@ fn default_post_info() -> PostDispatchInfo { PostDispatchInfo { actual_weight: None, pays_fee: Default::default() } } +type Ext = ChargeTransactionPayment; + #[test] -fn signed_extension_transaction_payment_work() { +fn transaction_extension_transaction_payment_work() { ExtBuilder::default() .balance_factor(10) .base_weight(Weight::from_parts(5, 0)) .build() .execute_with(|| { - let len = 10; - let pre = ChargeTransactionPayment::::from(0) - .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_parts(5, 0)), len) + let info = info_from_weight(Weight::from_parts(5, 0)); + Ext::from(0) + .test_run(Some(1).into(), CALL, &info, 10, |_| { + assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); + Ok(default_post_info()) + }) + .unwrap() .unwrap(); assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(5, 0)), - &default_post_info(), - len, - &Ok(()) - )); - assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); assert_eq!(FeeUnbalancedAmount::get(), 5 + 5 + 10); assert_eq!(TipUnbalancedAmount::get(), 0); FeeUnbalancedAmount::mutate(|a| *a = 0); - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) + let info = info_from_weight(Weight::from_parts(100, 0)); + Ext::from(5 /* tipped */) + .test_run(Some(2).into(), CALL, &info, 10, |_| { + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + Ok(post_info_from_weight(Weight::from_parts(50, 0))) + }) + .unwrap() .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(50, 0)), - len, - &Ok(()) - )); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 5); assert_eq!(FeeUnbalancedAmount::get(), 5 + 10 + 50); assert_eq!(TipUnbalancedAmount::get(), 5); @@ -173,43 +169,37 @@ fn signed_extension_transaction_payment_work() { } #[test] -fn signed_extension_transaction_payment_multiplied_refund_works() { +fn transaction_extension_transaction_payment_multiplied_refund_works() { ExtBuilder::default() .balance_factor(10) .base_weight(Weight::from_parts(5, 0)) .build() .execute_with(|| { - let len = 10; >::put(Multiplier::saturating_from_rational(3, 2)); - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) + let len = 10; + let origin = Some(2).into(); + let info = info_from_weight(Weight::from_parts(100, 0)); + Ext::from(5 /* tipped */) + .test_run(origin, CALL, &info, len, |_| { + // 5 base fee, 10 byte fee, 3/2 * 100 weight fee, 5 tip + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 150 - 5); + Ok(post_info_from_weight(Weight::from_parts(50, 0))) + }) + .unwrap() .unwrap(); - // 5 base fee, 10 byte fee, 3/2 * 100 weight fee, 5 tip - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 150 - 5); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(50, 0)), - len, - &Ok(()) - )); + // 75 (3/2 of the returned 50 units of weight) is refunded assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 75 - 5); }); } #[test] -fn signed_extension_transaction_payment_is_bounded() { +fn transaction_extension_transaction_payment_is_bounded() { ExtBuilder::default().balance_factor(1000).byte_fee(0).build().execute_with(|| { // maximum weight possible - assert_ok!(ChargeTransactionPayment::::from(0).pre_dispatch( - &1, - CALL, - &info_from_weight(Weight::MAX), - 10 - )); + let info = info_from_weight(Weight::MAX); + assert_ok!(Ext::from(0).validate_and_prepare(Some(1).into(), CALL, &info, 10)); // fee will be proportional to what is the actual maximum weight in the runtime. assert_eq!( Balances::free_balance(&1), @@ -220,7 +210,7 @@ fn signed_extension_transaction_payment_is_bounded() { } #[test] -fn signed_extension_allows_free_transactions() { +fn transaction_extension_allows_free_transactions() { ExtBuilder::default() .base_weight(Weight::from_parts(100, 0)) .balance_factor(0) @@ -232,38 +222,28 @@ fn signed_extension_allows_free_transactions() { let len = 100; // This is a completely free (and thus wholly insecure/DoS-ridden) transaction. - let operational_transaction = DispatchInfo { + let op_tx = DispatchInfo { weight: Weight::from_parts(0, 0), class: DispatchClass::Operational, pays_fee: Pays::No, }; - assert_ok!(ChargeTransactionPayment::::from(0).validate( - &1, - CALL, - &operational_transaction, - len - )); + assert_ok!(Ext::from(0).validate_only(Some(1).into(), CALL, &op_tx, len)); // like a InsecureFreeNormal - let free_transaction = DispatchInfo { + let free_tx = DispatchInfo { weight: Weight::from_parts(0, 0), class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - assert_noop!( - ChargeTransactionPayment::::from(0).validate( - &1, - CALL, - &free_transaction, - len - ), + assert_eq!( + Ext::from(0).validate_only(Some(1).into(), CALL, &free_tx, len).unwrap_err(), TransactionValidityError::Invalid(InvalidTransaction::Payment), ); }); } #[test] -fn signed_ext_length_fee_is_also_updated_per_congestion() { +fn transaction_ext_length_fee_is_also_updated_per_congestion() { ExtBuilder::default() .base_weight(Weight::from_parts(5, 0)) .balance_factor(10) @@ -272,16 +252,15 @@ fn signed_ext_length_fee_is_also_updated_per_congestion() { // all fees should be x1.5 >::put(Multiplier::saturating_from_rational(3, 2)); let len = 10; - - assert_ok!(ChargeTransactionPayment::::from(10) // tipped - .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_parts(3, 0)), len)); + let info = &info_from_weight(Weight::from_parts(3, 0)); + assert_ok!(Ext::from(10).validate_and_prepare(Some(1).into(), CALL, info, len)); assert_eq!( Balances::free_balance(1), 100 // original - - 10 // tip - - 5 // base - - 10 // len - - (3 * 3 / 2) // adjusted weight + - 10 // tip + - 5 // base + - 10 // len + - (3 * 3 / 2) // adjusted weight ); }) } @@ -291,62 +270,62 @@ fn query_info_and_fee_details_works() { let call = RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); let origin = 111111; let extra = (); - let xt = TestXt::new(call.clone(), Some((origin, extra))); + let xt = UncheckedExtrinsic::new_signed(call.clone(), origin, (), extra); let info = xt.get_dispatch_info(); let ext = xt.encode(); let len = ext.len() as u32; - let unsigned_xt = TestXt::<_, ()>::new(call, None); + let unsigned_xt = UncheckedExtrinsic::::new_bare(call); let unsigned_xt_info = unsigned_xt.get_dispatch_info(); ExtBuilder::default() - .base_weight(Weight::from_parts(5, 0)) - .weight_fee(2) - .build() - .execute_with(|| { - // all fees should be x1.5 - >::put(Multiplier::saturating_from_rational(3, 2)); - - assert_eq!( - TransactionPayment::query_info(xt.clone(), len), - RuntimeDispatchInfo { - weight: info.weight, - class: info.class, - partial_fee: 5 * 2 /* base * weight_fee */ - + len as u64 /* len * 1 */ - + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ - }, - ); - - assert_eq!( - TransactionPayment::query_info(unsigned_xt.clone(), len), - RuntimeDispatchInfo { - weight: unsigned_xt_info.weight, - class: unsigned_xt_info.class, - partial_fee: 0, - }, - ); - - assert_eq!( - TransactionPayment::query_fee_details(xt, len), - FeeDetails { - inclusion_fee: Some(InclusionFee { - base_fee: 5 * 2, - len_fee: len as u64, - adjusted_weight_fee: info - .weight - .min(BlockWeights::get().max_block) - .ref_time() as u64 * 2 * 3 / 2 - }), - tip: 0, - }, - ); - - assert_eq!( - TransactionPayment::query_fee_details(unsigned_xt, len), - FeeDetails { inclusion_fee: None, tip: 0 }, - ); - }); + .base_weight(Weight::from_parts(5, 0)) + .weight_fee(2) + .build() + .execute_with(|| { + // all fees should be x1.5 + >::put(Multiplier::saturating_from_rational(3, 2)); + + assert_eq!( + TransactionPayment::query_info(xt.clone(), len), + RuntimeDispatchInfo { + weight: info.weight, + class: info.class, + partial_fee: 5 * 2 /* base * weight_fee */ + + len as u64 /* len * 1 */ + + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ + }, + ); + + assert_eq!( + TransactionPayment::query_info(unsigned_xt.clone(), len), + RuntimeDispatchInfo { + weight: unsigned_xt_info.weight, + class: unsigned_xt_info.class, + partial_fee: 0, + }, + ); + + assert_eq!( + TransactionPayment::query_fee_details(xt, len), + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee: 5 * 2, + len_fee: len as u64, + adjusted_weight_fee: info + .weight + .min(BlockWeights::get().max_block) + .ref_time() as u64 * 2 * 3 / 2 + }), + tip: 0, + }, + ); + + assert_eq!( + TransactionPayment::query_fee_details(unsigned_xt, len), + FeeDetails { inclusion_fee: None, tip: 0 }, + ); + }); } #[test] @@ -357,39 +336,39 @@ fn query_call_info_and_fee_details_works() { let len = encoded_call.len() as u32; ExtBuilder::default() - .base_weight(Weight::from_parts(5, 0)) - .weight_fee(2) - .build() - .execute_with(|| { - // all fees should be x1.5 - >::put(Multiplier::saturating_from_rational(3, 2)); - - assert_eq!( - TransactionPayment::query_call_info(call.clone(), len), - RuntimeDispatchInfo { - weight: info.weight, - class: info.class, - partial_fee: 5 * 2 /* base * weight_fee */ - + len as u64 /* len * 1 */ - + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ - }, - ); - - assert_eq!( - TransactionPayment::query_call_fee_details(call, len), - FeeDetails { - inclusion_fee: Some(InclusionFee { - base_fee: 5 * 2, /* base * weight_fee */ - len_fee: len as u64, /* len * 1 */ - adjusted_weight_fee: info - .weight - .min(BlockWeights::get().max_block) - .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multipler */ - }), - tip: 0, - }, - ); - }); + .base_weight(Weight::from_parts(5, 0)) + .weight_fee(2) + .build() + .execute_with(|| { + // all fees should be x1.5 + >::put(Multiplier::saturating_from_rational(3, 2)); + + assert_eq!( + TransactionPayment::query_call_info(call.clone(), len), + RuntimeDispatchInfo { + weight: info.weight, + class: info.class, + partial_fee: 5 * 2 /* base * weight_fee */ + + len as u64 /* len * 1 */ + + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ + }, + ); + + assert_eq!( + TransactionPayment::query_call_fee_details(call, len), + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee: 5 * 2, /* base * weight_fee */ + len_fee: len as u64, /* len * 1 */ + adjusted_weight_fee: info + .weight + .min(BlockWeights::get().max_block) + .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multipler */ + }), + tip: 0, + }, + ); + }); } #[test] @@ -526,27 +505,23 @@ fn refund_does_not_recreate_account() { .execute_with(|| { // So events are emitted System::set_block_number(10); - let len = 10; - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) + let info = info_from_weight(Weight::from_parts(100, 0)); + Ext::from(5 /* tipped */) + .test_run(Some(2).into(), CALL, &info, 10, |origin| { + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + + // kill the account between pre and post dispatch + assert_ok!(Balances::transfer_allow_death( + origin, + 3, + Balances::free_balance(2) + )); + assert_eq!(Balances::free_balance(2), 0); + + Ok(post_info_from_weight(Weight::from_parts(50, 0))) + }) + .unwrap() .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - // kill the account between pre and post dispatch - assert_ok!(Balances::transfer_allow_death( - Some(2).into(), - 3, - Balances::free_balance(2) - )); - assert_eq!(Balances::free_balance(2), 0); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(50, 0)), - len, - &Ok(()) - )); assert_eq!(Balances::free_balance(2), 0); // Transfer Event System::assert_has_event(RuntimeEvent::Balances(pallet_balances::Event::Transfer { @@ -568,20 +543,15 @@ fn actual_weight_higher_than_max_refunds_nothing() { .base_weight(Weight::from_parts(5, 0)) .build() .execute_with(|| { - let len = 10; - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) + let info = info_from_weight(Weight::from_parts(100, 0)); + Ext::from(5 /* tipped */) + .test_run(Some(2).into(), CALL, &info, 10, |_| { + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + Ok(post_info_from_weight(Weight::from_parts(101, 0))) + }) + .unwrap() .unwrap(); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(101, 0)), - len, - &Ok(()) - )); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); }); } @@ -594,25 +564,20 @@ fn zero_transfer_on_free_transaction() { .execute_with(|| { // So events are emitted System::set_block_number(10); - let len = 10; - let dispatch_info = DispatchInfo { + let info = DispatchInfo { weight: Weight::from_parts(100, 0), pays_fee: Pays::No, class: DispatchClass::Normal, }; let user = 69; - let pre = ChargeTransactionPayment::::from(0) - .pre_dispatch(&user, CALL, &dispatch_info, len) + Ext::from(0) + .test_run(Some(user).into(), CALL, &info, 10, |_| { + assert_eq!(Balances::total_balance(&user), 0); + Ok(default_post_info()) + }) + .unwrap() .unwrap(); assert_eq!(Balances::total_balance(&user), 0); - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &dispatch_info, - &default_post_info(), - len, - &Ok(()) - )); - assert_eq!(Balances::total_balance(&user), 0); // TransactionFeePaid Event System::assert_has_event(RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { @@ -639,19 +604,11 @@ fn refund_consistent_with_actual_weight() { >::put(Multiplier::saturating_from_rational(5, 4)); - let pre = ChargeTransactionPayment::::from(tip) - .pre_dispatch(&2, CALL, &info, len) + Ext::from(tip) + .test_run(Some(2).into(), CALL, &info, len, |_| Ok(post_info)) + .unwrap() .unwrap(); - ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info, - &post_info, - len, - &Ok(()), - ) - .unwrap(); - let refund_based_fee = prev_balance - Balances::free_balance(2); let actual_fee = Pallet::::compute_actual_fee(len as u32, &info, &post_info, tip); @@ -673,18 +630,13 @@ fn should_alter_operational_priority() { class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - let priority = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &normal, len) - .unwrap() - .priority; + let ext = Ext::from(tip); + let priority = ext.validate_only(Some(2).into(), CALL, &normal, len).unwrap().0.priority; assert_eq!(priority, 60); - let priority = ChargeTransactionPayment::(2 * tip) - .validate(&2, CALL, &normal, len) - .unwrap() - .priority; - + let ext = Ext::from(2 * tip); + let priority = ext.validate_only(Some(2).into(), CALL, &normal, len).unwrap().0.priority; assert_eq!(priority, 110); }); @@ -694,16 +646,13 @@ fn should_alter_operational_priority() { class: DispatchClass::Operational, pays_fee: Pays::Yes, }; - let priority = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &op, len) - .unwrap() - .priority; + + let ext = Ext::from(tip); + let priority = ext.validate_only(Some(2).into(), CALL, &op, len).unwrap().0.priority; assert_eq!(priority, 5810); - let priority = ChargeTransactionPayment::(2 * tip) - .validate(&2, CALL, &op, len) - .unwrap() - .priority; + let ext = Ext::from(2 * tip); + let priority = ext.validate_only(Some(2).into(), CALL, &op, len).unwrap().0.priority; assert_eq!(priority, 6110); }); } @@ -719,11 +668,8 @@ fn no_tip_has_some_priority() { class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - let priority = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &normal, len) - .unwrap() - .priority; - + let ext = Ext::from(tip); + let priority = ext.validate_only(Some(2).into(), CALL, &normal, len).unwrap().0.priority; assert_eq!(priority, 10); }); @@ -733,10 +679,8 @@ fn no_tip_has_some_priority() { class: DispatchClass::Operational, pays_fee: Pays::Yes, }; - let priority = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &op, len) - .unwrap() - .priority; + let ext = Ext::from(tip); + let priority = ext.validate_only(Some(2).into(), CALL, &op, len).unwrap().0.priority; assert_eq!(priority, 5510); }); } @@ -744,8 +688,8 @@ fn no_tip_has_some_priority() { #[test] fn higher_tip_have_higher_priority() { let get_priorities = |tip: u64| { - let mut priority1 = 0; - let mut priority2 = 0; + let mut pri1 = 0; + let mut pri2 = 0; let len = 10; ExtBuilder::default().balance_factor(100).build().execute_with(|| { let normal = DispatchInfo { @@ -753,10 +697,8 @@ fn higher_tip_have_higher_priority() { class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - priority1 = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &normal, len) - .unwrap() - .priority; + let ext = Ext::from(tip); + pri1 = ext.validate_only(Some(2).into(), CALL, &normal, len).unwrap().0.priority; }); ExtBuilder::default().balance_factor(100).build().execute_with(|| { @@ -765,13 +707,11 @@ fn higher_tip_have_higher_priority() { class: DispatchClass::Operational, pays_fee: Pays::Yes, }; - priority2 = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &op, len) - .unwrap() - .priority; + let ext = Ext::from(tip); + pri2 = ext.validate_only(Some(2).into(), CALL, &op, len).unwrap().0.priority; }); - (priority1, priority2) + (pri1, pri2) }; let mut prev_priorities = get_priorities(0); @@ -799,19 +739,11 @@ fn post_info_can_change_pays_fee() { >::put(Multiplier::saturating_from_rational(5, 4)); - let pre = ChargeTransactionPayment::::from(tip) - .pre_dispatch(&2, CALL, &info, len) + let post_info = ChargeTransactionPayment::::from(tip) + .test_run(Some(2).into(), CALL, &info, len, |_| Ok(post_info)) + .unwrap() .unwrap(); - ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info, - &post_info, - len, - &Ok(()), - ) - .unwrap(); - let refund_based_fee = prev_balance - Balances::free_balance(2); let actual_fee = Pallet::::compute_actual_fee(len as u32, &info, &post_info, tip); diff --git a/substrate/frame/transaction-payment/src/types.rs b/substrate/frame/transaction-payment/src/types.rs index 25cecc58a63..bfb3012fef0 100644 --- a/substrate/frame/transaction-payment/src/types.rs +++ b/substrate/frame/transaction-payment/src/types.rs @@ -112,7 +112,7 @@ pub struct RuntimeDispatchInfo /// The inclusion fee of this dispatch. /// /// This does not include a tip or anything else that - /// depends on the signature (i.e. depends on a `SignedExtension`). + /// depends on the signature (i.e. depends on a `TransactionExtension`). #[cfg_attr(feature = "std", serde(with = "serde_balance"))] pub partial_fee: Balance, } diff --git a/substrate/frame/transaction-payment/src/weights.rs b/substrate/frame/transaction-payment/src/weights.rs new file mode 100644 index 00000000000..bcffb2eb331 --- /dev/null +++ b/substrate/frame/transaction-payment/src/weights.rs @@ -0,0 +1,92 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/transaction-payment/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_transaction_payment`. +pub trait WeightInfo { + fn charge_transaction_payment() -> Weight; +} + +/// Weights for `pallet_transaction_payment` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 40_506_000 picoseconds. + Weight::from_parts(41_647_000, 1733) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 40_506_000 picoseconds. + Weight::from_parts(41_647_000, 1733) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } +} diff --git a/substrate/frame/transaction-storage/src/weights.rs b/substrate/frame/transaction-storage/src/weights.rs index 519317177c4..bfbed62c5a3 100644 --- a/substrate/frame/transaction-storage/src/weights.rs +++ b/substrate/frame/transaction-storage/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_transaction_storage +//! Autogenerated weights for `pallet_transaction_storage` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/transaction-storage/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/transaction-storage/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,125 +49,133 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_transaction_storage. +/// Weight functions needed for `pallet_transaction_storage`. pub trait WeightInfo { fn store(l: u32, ) -> Weight; fn renew() -> Weight; fn check_proof_max() -> Weight; } -/// Weights for pallet_transaction_storage using the Substrate node and recommended hardware. +/// Weights for `pallet_transaction_storage` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: TransactionStorage ByteFee (r:1 w:0) - /// Proof: TransactionStorage ByteFee (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: TransactionStorage EntryFee (r:1 w:0) - /// Proof: TransactionStorage EntryFee (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: TransactionStorage BlockTransactions (r:1 w:1) - /// Proof: TransactionStorage BlockTransactions (max_values: Some(1), max_size: Some(36866), added: 37361, mode: MaxEncodedLen) + /// Storage: `TransactionStorage::ByteFee` (r:1 w:0) + /// Proof: `TransactionStorage::ByteFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::EntryFee` (r:1 w:0) + /// Proof: `TransactionStorage::EntryFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::BlockTransactions` (r:1 w:1) + /// Proof: `TransactionStorage::BlockTransactions` (`max_values`: Some(1), `max_size`: Some(36866), added: 37361, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 8388608]`. fn store(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `176` + // Measured: `242` // Estimated: `38351` - // Minimum execution time: 34_844_000 picoseconds. - Weight::from_parts(35_489_000, 38351) + // Minimum execution time: 57_912_000 picoseconds. + Weight::from_parts(58_642_000, 38351) // Standard Error: 11 - .saturating_add(Weight::from_parts(6_912, 0).saturating_mul(l.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(Weight::from_parts(6_778, 0).saturating_mul(l.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: TransactionStorage Transactions (r:1 w:0) - /// Proof: TransactionStorage Transactions (max_values: None, max_size: Some(36886), added: 39361, mode: MaxEncodedLen) - /// Storage: TransactionStorage ByteFee (r:1 w:0) - /// Proof: TransactionStorage ByteFee (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: TransactionStorage EntryFee (r:1 w:0) - /// Proof: TransactionStorage EntryFee (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: TransactionStorage BlockTransactions (r:1 w:1) - /// Proof: TransactionStorage BlockTransactions (max_values: Some(1), max_size: Some(36866), added: 37361, mode: MaxEncodedLen) + /// Storage: `TransactionStorage::Transactions` (r:1 w:0) + /// Proof: `TransactionStorage::Transactions` (`max_values`: None, `max_size`: Some(36886), added: 39361, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::ByteFee` (r:1 w:0) + /// Proof: `TransactionStorage::ByteFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::EntryFee` (r:1 w:0) + /// Proof: `TransactionStorage::EntryFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::BlockTransactions` (r:1 w:1) + /// Proof: `TransactionStorage::BlockTransactions` (`max_values`: Some(1), `max_size`: Some(36866), added: 37361, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `326` + // Measured: `430` // Estimated: `40351` - // Minimum execution time: 48_244_000 picoseconds. - Weight::from_parts(50_939_000, 40351) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + // Minimum execution time: 72_571_000 picoseconds. + Weight::from_parts(74_832_000, 40351) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: TransactionStorage ProofChecked (r:1 w:1) - /// Proof: TransactionStorage ProofChecked (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: TransactionStorage StoragePeriod (r:1 w:0) - /// Proof: TransactionStorage StoragePeriod (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: TransactionStorage ChunkCount (r:1 w:0) - /// Proof: TransactionStorage ChunkCount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TransactionStorage Transactions (r:1 w:0) - /// Proof: TransactionStorage Transactions (max_values: None, max_size: Some(36886), added: 39361, mode: MaxEncodedLen) + /// Storage: `TransactionStorage::ProofChecked` (r:1 w:1) + /// Proof: `TransactionStorage::ProofChecked` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::StoragePeriod` (r:1 w:0) + /// Proof: `TransactionStorage::StoragePeriod` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::ChunkCount` (r:1 w:0) + /// Proof: `TransactionStorage::ChunkCount` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::Transactions` (r:1 w:0) + /// Proof: `TransactionStorage::Transactions` (`max_values`: None, `max_size`: Some(36886), added: 39361, mode: `MaxEncodedLen`) fn check_proof_max() -> Weight { // Proof Size summary in bytes: - // Measured: `37145` + // Measured: `37211` // Estimated: `40351` - // Minimum execution time: 80_913_000 picoseconds. - Weight::from_parts(84_812_000, 40351) + // Minimum execution time: 65_985_000 picoseconds. + Weight::from_parts(72_960_000, 40351) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: TransactionStorage ByteFee (r:1 w:0) - /// Proof: TransactionStorage ByteFee (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: TransactionStorage EntryFee (r:1 w:0) - /// Proof: TransactionStorage EntryFee (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: TransactionStorage BlockTransactions (r:1 w:1) - /// Proof: TransactionStorage BlockTransactions (max_values: Some(1), max_size: Some(36866), added: 37361, mode: MaxEncodedLen) + /// Storage: `TransactionStorage::ByteFee` (r:1 w:0) + /// Proof: `TransactionStorage::ByteFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::EntryFee` (r:1 w:0) + /// Proof: `TransactionStorage::EntryFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::BlockTransactions` (r:1 w:1) + /// Proof: `TransactionStorage::BlockTransactions` (`max_values`: Some(1), `max_size`: Some(36866), added: 37361, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 8388608]`. fn store(l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `176` + // Measured: `242` // Estimated: `38351` - // Minimum execution time: 34_844_000 picoseconds. - Weight::from_parts(35_489_000, 38351) + // Minimum execution time: 57_912_000 picoseconds. + Weight::from_parts(58_642_000, 38351) // Standard Error: 11 - .saturating_add(Weight::from_parts(6_912, 0).saturating_mul(l.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + .saturating_add(Weight::from_parts(6_778, 0).saturating_mul(l.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: TransactionStorage Transactions (r:1 w:0) - /// Proof: TransactionStorage Transactions (max_values: None, max_size: Some(36886), added: 39361, mode: MaxEncodedLen) - /// Storage: TransactionStorage ByteFee (r:1 w:0) - /// Proof: TransactionStorage ByteFee (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: TransactionStorage EntryFee (r:1 w:0) - /// Proof: TransactionStorage EntryFee (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: TransactionStorage BlockTransactions (r:1 w:1) - /// Proof: TransactionStorage BlockTransactions (max_values: Some(1), max_size: Some(36866), added: 37361, mode: MaxEncodedLen) + /// Storage: `TransactionStorage::Transactions` (r:1 w:0) + /// Proof: `TransactionStorage::Transactions` (`max_values`: None, `max_size`: Some(36886), added: 39361, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::ByteFee` (r:1 w:0) + /// Proof: `TransactionStorage::ByteFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::EntryFee` (r:1 w:0) + /// Proof: `TransactionStorage::EntryFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::BlockTransactions` (r:1 w:1) + /// Proof: `TransactionStorage::BlockTransactions` (`max_values`: Some(1), `max_size`: Some(36866), added: 37361, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `326` + // Measured: `430` // Estimated: `40351` - // Minimum execution time: 48_244_000 picoseconds. - Weight::from_parts(50_939_000, 40351) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + // Minimum execution time: 72_571_000 picoseconds. + Weight::from_parts(74_832_000, 40351) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: TransactionStorage ProofChecked (r:1 w:1) - /// Proof: TransactionStorage ProofChecked (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: TransactionStorage StoragePeriod (r:1 w:0) - /// Proof: TransactionStorage StoragePeriod (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: TransactionStorage ChunkCount (r:1 w:0) - /// Proof: TransactionStorage ChunkCount (max_values: None, max_size: Some(24), added: 2499, mode: MaxEncodedLen) - /// Storage: System ParentHash (r:1 w:0) - /// Proof: System ParentHash (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TransactionStorage Transactions (r:1 w:0) - /// Proof: TransactionStorage Transactions (max_values: None, max_size: Some(36886), added: 39361, mode: MaxEncodedLen) + /// Storage: `TransactionStorage::ProofChecked` (r:1 w:1) + /// Proof: `TransactionStorage::ProofChecked` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::StoragePeriod` (r:1 w:0) + /// Proof: `TransactionStorage::StoragePeriod` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::ChunkCount` (r:1 w:0) + /// Proof: `TransactionStorage::ChunkCount` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `TransactionStorage::Transactions` (r:1 w:0) + /// Proof: `TransactionStorage::Transactions` (`max_values`: None, `max_size`: Some(36886), added: 39361, mode: `MaxEncodedLen`) fn check_proof_max() -> Weight { // Proof Size summary in bytes: - // Measured: `37145` + // Measured: `37211` // Estimated: `40351` - // Minimum execution time: 80_913_000 picoseconds. - Weight::from_parts(84_812_000, 40351) + // Minimum execution time: 65_985_000 picoseconds. + Weight::from_parts(72_960_000, 40351) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/treasury/src/weights.rs b/substrate/frame/treasury/src/weights.rs index 030e18980eb..8ef4f89fd65 100644 --- a/substrate/frame/treasury/src/weights.rs +++ b/substrate/frame/treasury/src/weights.rs @@ -15,27 +15,31 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_treasury +//! Autogenerated weights for `pallet_treasury` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-07, STEPS: `20`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/debug/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev -// --steps=20 -// --repeat=2 -// --pallet=pallet-treasury +// --steps=50 +// --repeat=20 +// --pallet=pallet_treasury +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/treasury/src/._weights.rs -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/treasury/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -45,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_treasury. +/// Weight functions needed for `pallet_treasury`. pub trait WeightInfo { fn spend_local() -> Weight; fn propose_spend() -> Weight; @@ -59,304 +63,304 @@ pub trait WeightInfo { fn void_spend() -> Weight; } -/// Weights for pallet_treasury using the Substrate node and recommended hardware. +/// Weights for `pallet_treasury` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn spend_local() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1887` - // Minimum execution time: 179_000_000 picoseconds. - Weight::from_parts(190_000_000, 1887) + // Minimum execution time: 11_686_000 picoseconds. + Weight::from_parts(12_138_000, 1887) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn propose_spend() -> Weight { // Proof Size summary in bytes: // Measured: `177` // Estimated: `1489` - // Minimum execution time: 349_000_000 picoseconds. - Weight::from_parts(398_000_000, 1489) + // Minimum execution time: 23_773_000 picoseconds. + Weight::from_parts(24_801_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Treasury Proposals (r:1 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Treasury::Proposals` (r:1 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn reject_proposal() -> Weight { // Proof Size summary in bytes: // Measured: `335` // Estimated: `3593` - // Minimum execution time: 367_000_000 picoseconds. - Weight::from_parts(388_000_000, 3593) + // Minimum execution time: 24_533_000 picoseconds. + Weight::from_parts(25_731_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Treasury Proposals (r:1 w:0) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Proposals` (r:1 w:0) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn approve_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `483 + p * (9 Β±0)` + // Measured: `504 + p * (8 Β±0)` // Estimated: `3573` - // Minimum execution time: 111_000_000 picoseconds. - Weight::from_parts(108_813_243, 3573) - // Standard Error: 147_887 - .saturating_add(Weight::from_parts(683_216, 0).saturating_mul(p.into())) + // Minimum execution time: 8_245_000 picoseconds. + Weight::from_parts(11_300_222, 3573) + // Standard Error: 1_080 + .saturating_add(Weight::from_parts(71_375, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `161` // Estimated: `1887` - // Minimum execution time: 71_000_000 picoseconds. - Weight::from_parts(78_000_000, 1887) + // Minimum execution time: 6_231_000 picoseconds. + Weight::from_parts(6_517_000, 1887) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Treasury Deactivated (r:1 w:1) - /// Proof: Treasury Deactivated (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:99 w:99) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:198 w:198) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Deactivated` (r:1 w:1) + /// Proof: `Treasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:99 w:99) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:198 w:198) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `427 + p * (251 Β±0)` + // Measured: `451 + p * (251 Β±0)` // Estimated: `1887 + p * (5206 Β±0)` - // Minimum execution time: 614_000_000 picoseconds. - Weight::from_parts(498_501_558, 1887) - // Standard Error: 1_070_260 - .saturating_add(Weight::from_parts(599_011_690, 0).saturating_mul(p.into())) + // Minimum execution time: 30_729_000 picoseconds. + Weight::from_parts(37_861_389, 1887) + // Standard Error: 18_741 + .saturating_add(Weight::from_parts(31_377_118, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:0) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) - /// Storage: Treasury SpendCount (r:1 w:1) - /// Proof: Treasury SpendCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Spends (r:0 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:0) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Treasury::SpendCount` (r:1 w:1) + /// Proof: `Treasury::SpendCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Spends` (r:0 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn spend() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `3501` - // Minimum execution time: 214_000_000 picoseconds. - Weight::from_parts(216_000_000, 3501) + // Minimum execution time: 13_700_000 picoseconds. + Weight::from_parts(14_519_000, 3501) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `705` + // Measured: `709` // Estimated: `6208` - // Minimum execution time: 760_000_000 picoseconds. - Weight::from_parts(822_000_000, 6208) + // Minimum execution time: 54_888_000 picoseconds. + Weight::from_parts(55_966_000, 6208) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn check_status() -> Weight { // Proof Size summary in bytes: - // Measured: `194` - // Estimated: `3534` - // Minimum execution time: 153_000_000 picoseconds. - Weight::from_parts(160_000_000, 3534) + // Measured: `198` + // Estimated: `3538` + // Minimum execution time: 11_802_000 picoseconds. + Weight::from_parts(12_421_000, 3538) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn void_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `194` - // Estimated: `3534` - // Minimum execution time: 147_000_000 picoseconds. - Weight::from_parts(181_000_000, 3534) + // Measured: `198` + // Estimated: `3538` + // Minimum execution time: 10_571_000 picoseconds. + Weight::from_parts(11_052_000, 3538) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn spend_local() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1887` - // Minimum execution time: 179_000_000 picoseconds. - Weight::from_parts(190_000_000, 1887) + // Minimum execution time: 11_686_000 picoseconds. + Weight::from_parts(12_138_000, 1887) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn propose_spend() -> Weight { // Proof Size summary in bytes: // Measured: `177` // Estimated: `1489` - // Minimum execution time: 349_000_000 picoseconds. - Weight::from_parts(398_000_000, 1489) + // Minimum execution time: 23_773_000 picoseconds. + Weight::from_parts(24_801_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Treasury Proposals (r:1 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Treasury::Proposals` (r:1 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn reject_proposal() -> Weight { // Proof Size summary in bytes: // Measured: `335` // Estimated: `3593` - // Minimum execution time: 367_000_000 picoseconds. - Weight::from_parts(388_000_000, 3593) + // Minimum execution time: 24_533_000 picoseconds. + Weight::from_parts(25_731_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Treasury Proposals (r:1 w:0) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Proposals` (r:1 w:0) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn approve_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `483 + p * (9 Β±0)` + // Measured: `504 + p * (8 Β±0)` // Estimated: `3573` - // Minimum execution time: 111_000_000 picoseconds. - Weight::from_parts(108_813_243, 3573) - // Standard Error: 147_887 - .saturating_add(Weight::from_parts(683_216, 0).saturating_mul(p.into())) + // Minimum execution time: 8_245_000 picoseconds. + Weight::from_parts(11_300_222, 3573) + // Standard Error: 1_080 + .saturating_add(Weight::from_parts(71_375, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `161` // Estimated: `1887` - // Minimum execution time: 71_000_000 picoseconds. - Weight::from_parts(78_000_000, 1887) + // Minimum execution time: 6_231_000 picoseconds. + Weight::from_parts(6_517_000, 1887) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Treasury Deactivated (r:1 w:1) - /// Proof: Treasury Deactivated (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:99 w:99) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:198 w:198) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Deactivated` (r:1 w:1) + /// Proof: `Treasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:99 w:99) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:198 w:198) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `427 + p * (251 Β±0)` + // Measured: `451 + p * (251 Β±0)` // Estimated: `1887 + p * (5206 Β±0)` - // Minimum execution time: 614_000_000 picoseconds. - Weight::from_parts(498_501_558, 1887) - // Standard Error: 1_070_260 - .saturating_add(Weight::from_parts(599_011_690, 0).saturating_mul(p.into())) + // Minimum execution time: 30_729_000 picoseconds. + Weight::from_parts(37_861_389, 1887) + // Standard Error: 18_741 + .saturating_add(Weight::from_parts(31_377_118, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(p.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:0) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(36), added: 2511, mode: MaxEncodedLen) - /// Storage: Treasury SpendCount (r:1 w:1) - /// Proof: Treasury SpendCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Spends (r:0 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:0) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Storage: `Treasury::SpendCount` (r:1 w:1) + /// Proof: `Treasury::SpendCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Spends` (r:0 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn spend() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `3501` - // Minimum execution time: 214_000_000 picoseconds. - Weight::from_parts(216_000_000, 3501) + // Minimum execution time: 13_700_000 picoseconds. + Weight::from_parts(14_519_000, 3501) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: Assets Asset (r:1 w:1) - /// Proof: Assets Asset (max_values: None, max_size: Some(210), added: 2685, mode: MaxEncodedLen) - /// Storage: Assets Account (r:2 w:2) - /// Proof: Assets Account (max_values: None, max_size: Some(134), added: 2609, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `705` + // Measured: `709` // Estimated: `6208` - // Minimum execution time: 760_000_000 picoseconds. - Weight::from_parts(822_000_000, 6208) + // Minimum execution time: 54_888_000 picoseconds. + Weight::from_parts(55_966_000, 6208) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn check_status() -> Weight { // Proof Size summary in bytes: - // Measured: `194` - // Estimated: `3534` - // Minimum execution time: 153_000_000 picoseconds. - Weight::from_parts(160_000_000, 3534) + // Measured: `198` + // Estimated: `3538` + // Minimum execution time: 11_802_000 picoseconds. + Weight::from_parts(12_421_000, 3538) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn void_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `194` - // Estimated: `3534` - // Minimum execution time: 147_000_000 picoseconds. - Weight::from_parts(181_000_000, 3534) + // Measured: `198` + // Estimated: `3538` + // Minimum execution time: 10_571_000 picoseconds. + Weight::from_parts(11_052_000, 3538) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/tx-pause/src/weights.rs b/substrate/frame/tx-pause/src/weights.rs index b733e64b159..14fead12a8e 100644 --- a/substrate/frame/tx-pause/src/weights.rs +++ b/substrate/frame/tx-pause/src/weights.rs @@ -17,27 +17,29 @@ //! Autogenerated weights for `pallet_tx_pause` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-aahe6cbd-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_tx_pause +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_tx_pause -// --chain=dev -// --header=./HEADER-APACHE2 -// --output=./frame/tx-pause/src/weights.rs -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/tx-pause/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -62,8 +64,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3` // Estimated: `3997` - // Minimum execution time: 15_096_000 picoseconds. - Weight::from_parts(15_437_000, 3997) + // Minimum execution time: 12_014_000 picoseconds. + Weight::from_parts(12_980_000, 3997) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -73,8 +75,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `565` // Estimated: `3997` - // Minimum execution time: 21_546_000 picoseconds. - Weight::from_parts(22_178_000, 3997) + // Minimum execution time: 17_955_000 picoseconds. + Weight::from_parts(18_348_000, 3997) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -88,8 +90,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3` // Estimated: `3997` - // Minimum execution time: 15_096_000 picoseconds. - Weight::from_parts(15_437_000, 3997) + // Minimum execution time: 12_014_000 picoseconds. + Weight::from_parts(12_980_000, 3997) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -99,8 +101,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `565` // Estimated: `3997` - // Minimum execution time: 21_546_000 picoseconds. - Weight::from_parts(22_178_000, 3997) + // Minimum execution time: 17_955_000 picoseconds. + Weight::from_parts(18_348_000, 3997) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/uniques/src/weights.rs b/substrate/frame/uniques/src/weights.rs index eb80ee550a1..6c7b60b82e5 100644 --- a/substrate/frame/uniques/src/weights.rs +++ b/substrate/frame/uniques/src/weights.rs @@ -17,27 +17,29 @@ //! Autogenerated weights for `pallet_uniques` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-gghbxkbs-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_uniques +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_uniques -// --chain=dev -// --header=./HEADER-APACHE2 -// --output=./frame/uniques/src/weights.rs -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/uniques/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -88,8 +90,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `249` // Estimated: `3643` - // Minimum execution time: 31_393_000 picoseconds. - Weight::from_parts(32_933_000, 3643) + // Minimum execution time: 27_960_000 picoseconds. + Weight::from_parts(28_781_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -101,8 +103,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3643` - // Minimum execution time: 14_827_000 picoseconds. - Weight::from_parts(15_273_000, 3643) + // Minimum execution time: 11_970_000 picoseconds. + Weight::from_parts(12_512_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -111,13 +113,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Uniques::Asset` (r:1001 w:1000) /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1000 w:1000) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) /// Storage: `Uniques::Attribute` (r:1000 w:1000) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassAccount` (r:0 w:1) /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassMetadataOf` (r:0 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) /// Storage: `Uniques::Account` (r:0 w:1000) /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) /// Storage: `Uniques::CollectionMaxSupply` (r:0 w:1) @@ -128,15 +130,15 @@ impl WeightInfo for SubstrateWeight { fn destroy(n: u32, m: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `418 + a * (107 Β±0) + m * (56 Β±0) + n * (76 Β±0)` - // Estimated: `3643 + a * (2839 Β±0) + m * (2583 Β±0) + n * (2597 Β±0)` - // Minimum execution time: 3_281_673_000 picoseconds. - Weight::from_parts(3_443_387_000, 3643) - // Standard Error: 41_937 - .saturating_add(Weight::from_parts(7_914_842, 0).saturating_mul(n.into())) - // Standard Error: 41_937 - .saturating_add(Weight::from_parts(519_960, 0).saturating_mul(m.into())) - // Standard Error: 41_937 - .saturating_add(Weight::from_parts(462_690, 0).saturating_mul(a.into())) + // Estimated: `3643 + a * (2647 Β±0) + m * (2662 Β±0) + n * (2597 Β±0)` + // Minimum execution time: 2_979_858_000 picoseconds. + Weight::from_parts(3_007_268_000, 3643) + // Standard Error: 29_899 + .saturating_add(Weight::from_parts(6_943_612, 0).saturating_mul(n.into())) + // Standard Error: 29_899 + .saturating_add(Weight::from_parts(398_114, 0).saturating_mul(m.into())) + // Standard Error: 29_899 + .saturating_add(Weight::from_parts(369_941, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) @@ -145,8 +147,8 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2839).saturating_mul(a.into())) - .saturating_add(Weight::from_parts(0, 2583).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 2647).saturating_mul(a.into())) + .saturating_add(Weight::from_parts(0, 2662).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 2597).saturating_mul(n.into())) } /// Storage: `Uniques::Asset` (r:1 w:1) @@ -161,8 +163,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 38_122_000 picoseconds. - Weight::from_parts(38_924_000, 3643) + // Minimum execution time: 32_733_000 picoseconds. + Weight::from_parts(33_487_000, 3643) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -178,8 +180,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 38_835_000 picoseconds. - Weight::from_parts(39_754_000, 3643) + // Minimum execution time: 34_202_000 picoseconds. + Weight::from_parts(35_146_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -195,8 +197,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 27_032_000 picoseconds. - Weight::from_parts(27_793_000, 3643) + // Minimum execution time: 24_285_000 picoseconds. + Weight::from_parts(25_300_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -209,10 +211,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `805 + i * (76 Β±0)` // Estimated: `3643 + i * (2597 Β±0)` - // Minimum execution time: 14_737_000 picoseconds. - Weight::from_parts(15_070_000, 3643) - // Standard Error: 22_500 - .saturating_add(Weight::from_parts(18_855_468, 0).saturating_mul(i.into())) + // Minimum execution time: 11_641_000 picoseconds. + Weight::from_parts(12_261_000, 3643) + // Standard Error: 18_897 + .saturating_add(Weight::from_parts(15_263_570, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -227,8 +229,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 18_664_000 picoseconds. - Weight::from_parts(19_455_000, 3643) + // Minimum execution time: 16_122_000 picoseconds. + Weight::from_parts(16_627_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -240,8 +242,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 18_247_000 picoseconds. - Weight::from_parts(18_763_000, 3643) + // Minimum execution time: 15_824_000 picoseconds. + Weight::from_parts(16_522_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -251,8 +253,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 13_219_000 picoseconds. - Weight::from_parts(13_923_000, 3643) + // Minimum execution time: 10_802_000 picoseconds. + Weight::from_parts(11_206_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -262,8 +264,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 13_376_000 picoseconds. - Weight::from_parts(13_904_000, 3643) + // Minimum execution time: 10_805_000 picoseconds. + Weight::from_parts(11_340_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -271,16 +273,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassAccount` (r:0 w:2) /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `423` + // Measured: `597` // Estimated: `3643` - // Minimum execution time: 22_353_000 picoseconds. - Weight::from_parts(23_222_000, 3643) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Minimum execution time: 24_213_000 picoseconds. + Weight::from_parts(25_547_000, 3643) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) @@ -288,8 +292,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 14_072_000 picoseconds. - Weight::from_parts(14_619_000, 3643) + // Minimum execution time: 11_256_000 picoseconds. + Weight::from_parts(11_585_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -301,90 +305,90 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 17_081_000 picoseconds. - Weight::from_parts(17_698_000, 3643) + // Minimum execution time: 14_616_000 picoseconds. + Weight::from_parts(15_064_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) /// Storage: `Uniques::Attribute` (r:1 w:1) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) fn set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `547` - // Estimated: `3829` - // Minimum execution time: 41_501_000 picoseconds. - Weight::from_parts(43_101_000, 3829) + // Measured: `626` + // Estimated: `3652` + // Minimum execution time: 35_640_000 picoseconds. + Weight::from_parts(37_007_000, 3652) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) /// Storage: `Uniques::Attribute` (r:1 w:1) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `936` - // Estimated: `3829` - // Minimum execution time: 39_722_000 picoseconds. - Weight::from_parts(40_390_000, 3829) + // Measured: `823` + // Estimated: `3652` + // Minimum execution time: 34_410_000 picoseconds. + Weight::from_parts(35_431_000, 3652) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) fn set_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `415` - // Estimated: `3643` - // Minimum execution time: 30_726_000 picoseconds. - Weight::from_parts(31_557_000, 3643) + // Estimated: `3652` + // Minimum execution time: 26_187_000 picoseconds. + Weight::from_parts(27_837_000, 3652) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `547` - // Estimated: `3643` - // Minimum execution time: 31_303_000 picoseconds. - Weight::from_parts(32_389_000, 3643) + // Measured: `626` + // Estimated: `3652` + // Minimum execution time: 26_640_000 picoseconds. + Weight::from_parts(27_688_000, 3652) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 32_155_000 picoseconds. - Weight::from_parts(32_885_000, 3643) + // Minimum execution time: 26_781_000 picoseconds. + Weight::from_parts(27_628_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:0) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `461` + // Measured: `540` // Estimated: `3643` - // Minimum execution time: 30_044_000 picoseconds. - Weight::from_parts(31_405_000, 3643) + // Minimum execution time: 26_295_000 picoseconds. + Weight::from_parts(26_903_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -396,8 +400,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 18_904_000 picoseconds. - Weight::from_parts(19_687_000, 3643) + // Minimum execution time: 16_678_000 picoseconds. + Weight::from_parts(17_548_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -409,8 +413,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `528` // Estimated: `3643` - // Minimum execution time: 19_144_000 picoseconds. - Weight::from_parts(19_706_000, 3643) + // Minimum execution time: 16_408_000 picoseconds. + Weight::from_parts(16_957_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -420,8 +424,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3517` - // Minimum execution time: 15_339_000 picoseconds. - Weight::from_parts(15_918_000, 3517) + // Minimum execution time: 12_568_000 picoseconds. + Weight::from_parts(12_960_000, 3517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -433,8 +437,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 15_387_000 picoseconds. - Weight::from_parts(15_726_000, 3643) + // Minimum execution time: 13_110_000 picoseconds. + Weight::from_parts(13_620_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -446,8 +450,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3587` - // Minimum execution time: 15_873_000 picoseconds. - Weight::from_parts(16_860_000, 3587) + // Minimum execution time: 13_568_000 picoseconds. + Weight::from_parts(14_211_000, 3587) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -463,8 +467,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `607` // Estimated: `3643` - // Minimum execution time: 37_245_000 picoseconds. - Weight::from_parts(38_383_000, 3643) + // Minimum execution time: 32_660_000 picoseconds. + Weight::from_parts(33_734_000, 3643) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -480,8 +484,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `249` // Estimated: `3643` - // Minimum execution time: 31_393_000 picoseconds. - Weight::from_parts(32_933_000, 3643) + // Minimum execution time: 27_960_000 picoseconds. + Weight::from_parts(28_781_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -493,8 +497,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3643` - // Minimum execution time: 14_827_000 picoseconds. - Weight::from_parts(15_273_000, 3643) + // Minimum execution time: 11_970_000 picoseconds. + Weight::from_parts(12_512_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -503,13 +507,13 @@ impl WeightInfo for () { /// Storage: `Uniques::Asset` (r:1001 w:1000) /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1000 w:1000) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) /// Storage: `Uniques::Attribute` (r:1000 w:1000) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassAccount` (r:0 w:1) /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassMetadataOf` (r:0 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) /// Storage: `Uniques::Account` (r:0 w:1000) /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) /// Storage: `Uniques::CollectionMaxSupply` (r:0 w:1) @@ -520,15 +524,15 @@ impl WeightInfo for () { fn destroy(n: u32, m: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `418 + a * (107 Β±0) + m * (56 Β±0) + n * (76 Β±0)` - // Estimated: `3643 + a * (2839 Β±0) + m * (2583 Β±0) + n * (2597 Β±0)` - // Minimum execution time: 3_281_673_000 picoseconds. - Weight::from_parts(3_443_387_000, 3643) - // Standard Error: 41_937 - .saturating_add(Weight::from_parts(7_914_842, 0).saturating_mul(n.into())) - // Standard Error: 41_937 - .saturating_add(Weight::from_parts(519_960, 0).saturating_mul(m.into())) - // Standard Error: 41_937 - .saturating_add(Weight::from_parts(462_690, 0).saturating_mul(a.into())) + // Estimated: `3643 + a * (2647 Β±0) + m * (2662 Β±0) + n * (2597 Β±0)` + // Minimum execution time: 2_979_858_000 picoseconds. + Weight::from_parts(3_007_268_000, 3643) + // Standard Error: 29_899 + .saturating_add(Weight::from_parts(6_943_612, 0).saturating_mul(n.into())) + // Standard Error: 29_899 + .saturating_add(Weight::from_parts(398_114, 0).saturating_mul(m.into())) + // Standard Error: 29_899 + .saturating_add(Weight::from_parts(369_941, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) @@ -537,8 +541,8 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(m.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2839).saturating_mul(a.into())) - .saturating_add(Weight::from_parts(0, 2583).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 2647).saturating_mul(a.into())) + .saturating_add(Weight::from_parts(0, 2662).saturating_mul(m.into())) .saturating_add(Weight::from_parts(0, 2597).saturating_mul(n.into())) } /// Storage: `Uniques::Asset` (r:1 w:1) @@ -553,8 +557,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 38_122_000 picoseconds. - Weight::from_parts(38_924_000, 3643) + // Minimum execution time: 32_733_000 picoseconds. + Weight::from_parts(33_487_000, 3643) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -570,8 +574,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 38_835_000 picoseconds. - Weight::from_parts(39_754_000, 3643) + // Minimum execution time: 34_202_000 picoseconds. + Weight::from_parts(35_146_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -587,8 +591,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 27_032_000 picoseconds. - Weight::from_parts(27_793_000, 3643) + // Minimum execution time: 24_285_000 picoseconds. + Weight::from_parts(25_300_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -601,10 +605,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `805 + i * (76 Β±0)` // Estimated: `3643 + i * (2597 Β±0)` - // Minimum execution time: 14_737_000 picoseconds. - Weight::from_parts(15_070_000, 3643) - // Standard Error: 22_500 - .saturating_add(Weight::from_parts(18_855_468, 0).saturating_mul(i.into())) + // Minimum execution time: 11_641_000 picoseconds. + Weight::from_parts(12_261_000, 3643) + // Standard Error: 18_897 + .saturating_add(Weight::from_parts(15_263_570, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -619,8 +623,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 18_664_000 picoseconds. - Weight::from_parts(19_455_000, 3643) + // Minimum execution time: 16_122_000 picoseconds. + Weight::from_parts(16_627_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -632,8 +636,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 18_247_000 picoseconds. - Weight::from_parts(18_763_000, 3643) + // Minimum execution time: 15_824_000 picoseconds. + Weight::from_parts(16_522_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -643,8 +647,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 13_219_000 picoseconds. - Weight::from_parts(13_923_000, 3643) + // Minimum execution time: 10_802_000 picoseconds. + Weight::from_parts(11_206_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -654,8 +658,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 13_376_000 picoseconds. - Weight::from_parts(13_904_000, 3643) + // Minimum execution time: 10_805_000 picoseconds. + Weight::from_parts(11_340_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -663,16 +667,18 @@ impl WeightInfo for () { /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassAccount` (r:0 w:2) /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `423` + // Measured: `597` // Estimated: `3643` - // Minimum execution time: 22_353_000 picoseconds. - Weight::from_parts(23_222_000, 3643) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Minimum execution time: 24_213_000 picoseconds. + Weight::from_parts(25_547_000, 3643) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) @@ -680,8 +686,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 14_072_000 picoseconds. - Weight::from_parts(14_619_000, 3643) + // Minimum execution time: 11_256_000 picoseconds. + Weight::from_parts(11_585_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -693,90 +699,90 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 17_081_000 picoseconds. - Weight::from_parts(17_698_000, 3643) + // Minimum execution time: 14_616_000 picoseconds. + Weight::from_parts(15_064_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) /// Storage: `Uniques::Attribute` (r:1 w:1) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) fn set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `547` - // Estimated: `3829` - // Minimum execution time: 41_501_000 picoseconds. - Weight::from_parts(43_101_000, 3829) + // Measured: `626` + // Estimated: `3652` + // Minimum execution time: 35_640_000 picoseconds. + Weight::from_parts(37_007_000, 3652) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) /// Storage: `Uniques::Attribute` (r:1 w:1) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(364), added: 2839, mode: `MaxEncodedLen`) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `936` - // Estimated: `3829` - // Minimum execution time: 39_722_000 picoseconds. - Weight::from_parts(40_390_000, 3829) + // Measured: `823` + // Estimated: `3652` + // Minimum execution time: 34_410_000 picoseconds. + Weight::from_parts(35_431_000, 3652) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) fn set_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `415` - // Estimated: `3643` - // Minimum execution time: 30_726_000 picoseconds. - Weight::from_parts(31_557_000, 3643) + // Estimated: `3652` + // Minimum execution time: 26_187_000 picoseconds. + Weight::from_parts(27_837_000, 3652) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `547` - // Estimated: `3643` - // Minimum execution time: 31_303_000 picoseconds. - Weight::from_parts(32_389_000, 3643) + // Measured: `626` + // Estimated: `3652` + // Minimum execution time: 26_640_000 picoseconds. + Weight::from_parts(27_688_000, 3652) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:1) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 32_155_000 picoseconds. - Weight::from_parts(32_885_000, 3643) + // Minimum execution time: 26_781_000 picoseconds. + Weight::from_parts(27_628_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Uniques::Class` (r:1 w:0) /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `461` + // Measured: `540` // Estimated: `3643` - // Minimum execution time: 30_044_000 picoseconds. - Weight::from_parts(31_405_000, 3643) + // Minimum execution time: 26_295_000 picoseconds. + Weight::from_parts(26_903_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -788,8 +794,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `495` // Estimated: `3643` - // Minimum execution time: 18_904_000 picoseconds. - Weight::from_parts(19_687_000, 3643) + // Minimum execution time: 16_678_000 picoseconds. + Weight::from_parts(17_548_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -801,8 +807,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `528` // Estimated: `3643` - // Minimum execution time: 19_144_000 picoseconds. - Weight::from_parts(19_706_000, 3643) + // Minimum execution time: 16_408_000 picoseconds. + Weight::from_parts(16_957_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -812,8 +818,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3517` - // Minimum execution time: 15_339_000 picoseconds. - Weight::from_parts(15_918_000, 3517) + // Minimum execution time: 12_568_000 picoseconds. + Weight::from_parts(12_960_000, 3517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -825,8 +831,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `349` // Estimated: `3643` - // Minimum execution time: 15_387_000 picoseconds. - Weight::from_parts(15_726_000, 3643) + // Minimum execution time: 13_110_000 picoseconds. + Weight::from_parts(13_620_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -838,8 +844,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `3587` - // Minimum execution time: 15_873_000 picoseconds. - Weight::from_parts(16_860_000, 3587) + // Minimum execution time: 13_568_000 picoseconds. + Weight::from_parts(14_211_000, 3587) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -855,8 +861,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `607` // Estimated: `3643` - // Minimum execution time: 37_245_000 picoseconds. - Weight::from_parts(38_383_000, 3643) + // Minimum execution time: 32_660_000 picoseconds. + Weight::from_parts(33_734_000, 3643) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } diff --git a/substrate/frame/utility/src/weights.rs b/substrate/frame/utility/src/weights.rs index 1a3ea6c1f7f..60b1c48f086 100644 --- a/substrate/frame/utility/src/weights.rs +++ b/substrate/frame/utility/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_utility +//! Autogenerated weights for `pallet_utility` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/utility/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/utility/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_utility. +/// Weight functions needed for `pallet_utility`. pub trait WeightInfo { fn batch(c: u32, ) -> Weight; fn as_derivative() -> Weight; @@ -59,99 +58,139 @@ pub trait WeightInfo { fn force_batch(c: u32, ) -> Weight; } -/// Weights for pallet_utility using the Substrate node and recommended hardware. +/// Weights for `pallet_utility` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_763_000 picoseconds. - Weight::from_parts(16_943_157, 0) - // Standard Error: 1_904 - .saturating_add(Weight::from_parts(4_653_855, 0).saturating_mul(c.into())) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 5_143_000 picoseconds. + Weight::from_parts(11_552_299, 3997) + // Standard Error: 2_325 + .saturating_add(Weight::from_parts(4_708_951, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn as_derivative() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 5_149_000 picoseconds. - Weight::from_parts(5_268_000, 0) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 9_103_000 picoseconds. + Weight::from_parts(9_549_000, 3997) + .saturating_add(T::DbWeight::get().reads(2_u64)) } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_976_000 picoseconds. - Weight::from_parts(16_448_433, 0) - // Standard Error: 1_834 - .saturating_add(Weight::from_parts(4_796_983, 0).saturating_mul(c.into())) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 5_147_000 picoseconds. + Weight::from_parts(15_698_086, 3997) + // Standard Error: 3_066 + .saturating_add(Weight::from_parts(4_809_412, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_102_000 picoseconds. - Weight::from_parts(9_353_000, 0) + // Minimum execution time: 6_683_000 picoseconds. + Weight::from_parts(7_047_000, 0) } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_840_000 picoseconds. - Weight::from_parts(17_748_474, 0) - // Standard Error: 2_059 - .saturating_add(Weight::from_parts(4_630_079, 0).saturating_mul(c.into())) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 4_982_000 picoseconds. + Weight::from_parts(14_474_780, 3997) + // Standard Error: 2_291 + .saturating_add(Weight::from_parts(4_632_643, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_763_000 picoseconds. - Weight::from_parts(16_943_157, 0) - // Standard Error: 1_904 - .saturating_add(Weight::from_parts(4_653_855, 0).saturating_mul(c.into())) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 5_143_000 picoseconds. + Weight::from_parts(11_552_299, 3997) + // Standard Error: 2_325 + .saturating_add(Weight::from_parts(4_708_951, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn as_derivative() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 5_149_000 picoseconds. - Weight::from_parts(5_268_000, 0) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 9_103_000 picoseconds. + Weight::from_parts(9_549_000, 3997) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_976_000 picoseconds. - Weight::from_parts(16_448_433, 0) - // Standard Error: 1_834 - .saturating_add(Weight::from_parts(4_796_983, 0).saturating_mul(c.into())) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 5_147_000 picoseconds. + Weight::from_parts(15_698_086, 3997) + // Standard Error: 3_066 + .saturating_add(Weight::from_parts(4_809_412, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_102_000 picoseconds. - Weight::from_parts(9_353_000, 0) + // Minimum execution time: 6_683_000 picoseconds. + Weight::from_parts(7_047_000, 0) } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_840_000 picoseconds. - Weight::from_parts(17_748_474, 0) - // Standard Error: 2_059 - .saturating_add(Weight::from_parts(4_630_079, 0).saturating_mul(c.into())) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 4_982_000 picoseconds. + Weight::from_parts(14_474_780, 3997) + // Standard Error: 2_291 + .saturating_add(Weight::from_parts(4_632_643, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } } diff --git a/substrate/frame/vesting/src/weights.rs b/substrate/frame/vesting/src/weights.rs index ddc7db8fa61..d91c47ae195 100644 --- a/substrate/frame/vesting/src/weights.rs +++ b/substrate/frame/vesting/src/weights.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_vesting` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_vesting +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_vesting -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/vesting/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -75,12 +77,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `381 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `4764` - // Minimum execution time: 32_846_000 picoseconds. - Weight::from_parts(30_974_459, 4764) - // Standard Error: 1_755 - .saturating_add(Weight::from_parts(73_138, 0).saturating_mul(l.into())) - // Standard Error: 3_123 - .saturating_add(Weight::from_parts(82_417, 0).saturating_mul(s.into())) + // Minimum execution time: 31_890_000 picoseconds. + Weight::from_parts(31_128_476, 4764) + // Standard Error: 1_193 + .saturating_add(Weight::from_parts(63_760, 0).saturating_mul(l.into())) + // Standard Error: 2_124 + .saturating_add(Weight::from_parts(57_247, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -96,12 +98,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `381 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `4764` - // Minimum execution time: 34_360_000 picoseconds. - Weight::from_parts(34_964_080, 4764) - // Standard Error: 1_996 - .saturating_add(Weight::from_parts(48_024, 0).saturating_mul(l.into())) - // Standard Error: 3_552 - .saturating_add(Weight::from_parts(34_411, 0).saturating_mul(s.into())) + // Minimum execution time: 33_958_000 picoseconds. + Weight::from_parts(33_537_005, 4764) + // Standard Error: 1_354 + .saturating_add(Weight::from_parts(60_604, 0).saturating_mul(l.into())) + // Standard Error: 2_410 + .saturating_add(Weight::from_parts(51_378, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -119,12 +121,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `484 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `4764` - // Minimum execution time: 33_859_000 picoseconds. - Weight::from_parts(32_950_681, 4764) - // Standard Error: 1_745 - .saturating_add(Weight::from_parts(69_323, 0).saturating_mul(l.into())) - // Standard Error: 3_105 - .saturating_add(Weight::from_parts(86_370, 0).saturating_mul(s.into())) + // Minimum execution time: 33_243_000 picoseconds. + Weight::from_parts(32_552_805, 4764) + // Standard Error: 1_413 + .saturating_add(Weight::from_parts(67_137, 0).saturating_mul(l.into())) + // Standard Error: 2_515 + .saturating_add(Weight::from_parts(70_000, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -142,12 +144,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `484 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `4764` - // Minimum execution time: 36_239_000 picoseconds. - Weight::from_parts(36_459_756, 4764) - // Standard Error: 2_051 - .saturating_add(Weight::from_parts(56_670, 0).saturating_mul(l.into())) - // Standard Error: 3_650 - .saturating_add(Weight::from_parts(49_663, 0).saturating_mul(s.into())) + // Minimum execution time: 35_396_000 picoseconds. + Weight::from_parts(35_774_949, 4764) + // Standard Error: 1_404 + .saturating_add(Weight::from_parts(55_349, 0).saturating_mul(l.into())) + // Standard Error: 2_497 + .saturating_add(Weight::from_parts(39_228, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -165,12 +167,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `555 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `4764` - // Minimum execution time: 70_330_000 picoseconds. - Weight::from_parts(71_196_328, 4764) - // Standard Error: 2_923 - .saturating_add(Weight::from_parts(67_238, 0).saturating_mul(l.into())) - // Standard Error: 5_201 - .saturating_add(Weight::from_parts(89_102, 0).saturating_mul(s.into())) + // Minimum execution time: 68_441_000 picoseconds. + Weight::from_parts(68_961_970, 4764) + // Standard Error: 2_509 + .saturating_add(Weight::from_parts(83_345, 0).saturating_mul(l.into())) + // Standard Error: 4_464 + .saturating_add(Weight::from_parts(124_158, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -188,12 +190,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `658 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `6196` - // Minimum execution time: 70_235_000 picoseconds. - Weight::from_parts(71_960_020, 6196) - // Standard Error: 2_493 - .saturating_add(Weight::from_parts(64_835, 0).saturating_mul(l.into())) - // Standard Error: 4_436 - .saturating_add(Weight::from_parts(102_159, 0).saturating_mul(s.into())) + // Minimum execution time: 70_190_000 picoseconds. + Weight::from_parts(72_355_984, 6196) + // Standard Error: 2_295 + .saturating_add(Weight::from_parts(59_968, 0).saturating_mul(l.into())) + // Standard Error: 4_084 + .saturating_add(Weight::from_parts(87_090, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -211,12 +213,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `482 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `4764` - // Minimum execution time: 34_352_000 picoseconds. - Weight::from_parts(33_697_027, 4764) - // Standard Error: 2_008 - .saturating_add(Weight::from_parts(79_270, 0).saturating_mul(l.into())) - // Standard Error: 3_710 - .saturating_add(Weight::from_parts(60_691, 0).saturating_mul(s.into())) + // Minimum execution time: 33_911_000 picoseconds. + Weight::from_parts(33_243_006, 4764) + // Standard Error: 1_199 + .saturating_add(Weight::from_parts(70_586, 0).saturating_mul(l.into())) + // Standard Error: 2_214 + .saturating_add(Weight::from_parts(60_985, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -234,12 +236,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `482 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `4764` - // Minimum execution time: 37_467_000 picoseconds. - Weight::from_parts(36_866_847, 4764) - // Standard Error: 1_692 - .saturating_add(Weight::from_parts(57_882, 0).saturating_mul(l.into())) - // Standard Error: 3_124 - .saturating_add(Weight::from_parts(80_266, 0).saturating_mul(s.into())) + // Minimum execution time: 36_242_000 picoseconds. + Weight::from_parts(35_812_994, 4764) + // Standard Error: 1_351 + .saturating_add(Weight::from_parts(68_256, 0).saturating_mul(l.into())) + // Standard Error: 2_496 + .saturating_add(Weight::from_parts(66_171, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -257,12 +259,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `555 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `4764` - // Minimum execution time: 41_497_000 picoseconds. - Weight::from_parts(38_763_834, 4764) - // Standard Error: 2_030 - .saturating_add(Weight::from_parts(99_580, 0).saturating_mul(l.into())) - // Standard Error: 3_750 - .saturating_add(Weight::from_parts(132_188, 0).saturating_mul(s.into())) + // Minimum execution time: 37_817_000 picoseconds. + Weight::from_parts(37_397_127, 4764) + // Standard Error: 1_665 + .saturating_add(Weight::from_parts(72_049, 0).saturating_mul(l.into())) + // Standard Error: 3_075 + .saturating_add(Weight::from_parts(60_973, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -282,12 +284,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `381 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `4764` - // Minimum execution time: 32_846_000 picoseconds. - Weight::from_parts(30_974_459, 4764) - // Standard Error: 1_755 - .saturating_add(Weight::from_parts(73_138, 0).saturating_mul(l.into())) - // Standard Error: 3_123 - .saturating_add(Weight::from_parts(82_417, 0).saturating_mul(s.into())) + // Minimum execution time: 31_890_000 picoseconds. + Weight::from_parts(31_128_476, 4764) + // Standard Error: 1_193 + .saturating_add(Weight::from_parts(63_760, 0).saturating_mul(l.into())) + // Standard Error: 2_124 + .saturating_add(Weight::from_parts(57_247, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -303,12 +305,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `381 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `4764` - // Minimum execution time: 34_360_000 picoseconds. - Weight::from_parts(34_964_080, 4764) - // Standard Error: 1_996 - .saturating_add(Weight::from_parts(48_024, 0).saturating_mul(l.into())) - // Standard Error: 3_552 - .saturating_add(Weight::from_parts(34_411, 0).saturating_mul(s.into())) + // Minimum execution time: 33_958_000 picoseconds. + Weight::from_parts(33_537_005, 4764) + // Standard Error: 1_354 + .saturating_add(Weight::from_parts(60_604, 0).saturating_mul(l.into())) + // Standard Error: 2_410 + .saturating_add(Weight::from_parts(51_378, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -326,12 +328,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `484 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `4764` - // Minimum execution time: 33_859_000 picoseconds. - Weight::from_parts(32_950_681, 4764) - // Standard Error: 1_745 - .saturating_add(Weight::from_parts(69_323, 0).saturating_mul(l.into())) - // Standard Error: 3_105 - .saturating_add(Weight::from_parts(86_370, 0).saturating_mul(s.into())) + // Minimum execution time: 33_243_000 picoseconds. + Weight::from_parts(32_552_805, 4764) + // Standard Error: 1_413 + .saturating_add(Weight::from_parts(67_137, 0).saturating_mul(l.into())) + // Standard Error: 2_515 + .saturating_add(Weight::from_parts(70_000, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -349,12 +351,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `484 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `4764` - // Minimum execution time: 36_239_000 picoseconds. - Weight::from_parts(36_459_756, 4764) - // Standard Error: 2_051 - .saturating_add(Weight::from_parts(56_670, 0).saturating_mul(l.into())) - // Standard Error: 3_650 - .saturating_add(Weight::from_parts(49_663, 0).saturating_mul(s.into())) + // Minimum execution time: 35_396_000 picoseconds. + Weight::from_parts(35_774_949, 4764) + // Standard Error: 1_404 + .saturating_add(Weight::from_parts(55_349, 0).saturating_mul(l.into())) + // Standard Error: 2_497 + .saturating_add(Weight::from_parts(39_228, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -372,12 +374,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `555 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `4764` - // Minimum execution time: 70_330_000 picoseconds. - Weight::from_parts(71_196_328, 4764) - // Standard Error: 2_923 - .saturating_add(Weight::from_parts(67_238, 0).saturating_mul(l.into())) - // Standard Error: 5_201 - .saturating_add(Weight::from_parts(89_102, 0).saturating_mul(s.into())) + // Minimum execution time: 68_441_000 picoseconds. + Weight::from_parts(68_961_970, 4764) + // Standard Error: 2_509 + .saturating_add(Weight::from_parts(83_345, 0).saturating_mul(l.into())) + // Standard Error: 4_464 + .saturating_add(Weight::from_parts(124_158, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -395,12 +397,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `658 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `6196` - // Minimum execution time: 70_235_000 picoseconds. - Weight::from_parts(71_960_020, 6196) - // Standard Error: 2_493 - .saturating_add(Weight::from_parts(64_835, 0).saturating_mul(l.into())) - // Standard Error: 4_436 - .saturating_add(Weight::from_parts(102_159, 0).saturating_mul(s.into())) + // Minimum execution time: 70_190_000 picoseconds. + Weight::from_parts(72_355_984, 6196) + // Standard Error: 2_295 + .saturating_add(Weight::from_parts(59_968, 0).saturating_mul(l.into())) + // Standard Error: 4_084 + .saturating_add(Weight::from_parts(87_090, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -418,12 +420,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `482 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `4764` - // Minimum execution time: 34_352_000 picoseconds. - Weight::from_parts(33_697_027, 4764) - // Standard Error: 2_008 - .saturating_add(Weight::from_parts(79_270, 0).saturating_mul(l.into())) - // Standard Error: 3_710 - .saturating_add(Weight::from_parts(60_691, 0).saturating_mul(s.into())) + // Minimum execution time: 33_911_000 picoseconds. + Weight::from_parts(33_243_006, 4764) + // Standard Error: 1_199 + .saturating_add(Weight::from_parts(70_586, 0).saturating_mul(l.into())) + // Standard Error: 2_214 + .saturating_add(Weight::from_parts(60_985, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -441,12 +443,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `482 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `4764` - // Minimum execution time: 37_467_000 picoseconds. - Weight::from_parts(36_866_847, 4764) - // Standard Error: 1_692 - .saturating_add(Weight::from_parts(57_882, 0).saturating_mul(l.into())) - // Standard Error: 3_124 - .saturating_add(Weight::from_parts(80_266, 0).saturating_mul(s.into())) + // Minimum execution time: 36_242_000 picoseconds. + Weight::from_parts(35_812_994, 4764) + // Standard Error: 1_351 + .saturating_add(Weight::from_parts(68_256, 0).saturating_mul(l.into())) + // Standard Error: 2_496 + .saturating_add(Weight::from_parts(66_171, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -464,12 +466,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `555 + l * (25 Β±0) + s * (36 Β±0)` // Estimated: `4764` - // Minimum execution time: 41_497_000 picoseconds. - Weight::from_parts(38_763_834, 4764) - // Standard Error: 2_030 - .saturating_add(Weight::from_parts(99_580, 0).saturating_mul(l.into())) - // Standard Error: 3_750 - .saturating_add(Weight::from_parts(132_188, 0).saturating_mul(s.into())) + // Minimum execution time: 37_817_000 picoseconds. + Weight::from_parts(37_397_127, 4764) + // Standard Error: 1_665 + .saturating_add(Weight::from_parts(72_049, 0).saturating_mul(l.into())) + // Standard Error: 3_075 + .saturating_add(Weight::from_parts(60_973, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/substrate/frame/whitelist/src/weights.rs b/substrate/frame/whitelist/src/weights.rs index de42c5a5841..13b653f6ca7 100644 --- a/substrate/frame/whitelist/src/weights.rs +++ b/substrate/frame/whitelist/src/weights.rs @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_whitelist +//! Autogenerated weights for `pallet_whitelist` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// ./target/production/substrate-node // benchmark // pallet // --chain=dev @@ -35,12 +35,11 @@ // --no-median-slopes // --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/whitelist/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --output=./substrate/frame/whitelist/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +49,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_whitelist. +/// Weight functions needed for `pallet_whitelist`. pub trait WeightInfo { fn whitelist_call() -> Weight; fn remove_whitelisted_call() -> Weight; @@ -58,133 +57,149 @@ pub trait WeightInfo { fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight; } -/// Weights for pallet_whitelist using the Substrate node and recommended hardware. +/// Weights for `pallet_whitelist` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Whitelist WhitelistedCall (r:1 w:1) - /// Proof: Whitelist WhitelistedCall (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn whitelist_call() -> Weight { // Proof Size summary in bytes: - // Measured: `217` + // Measured: `317` // Estimated: `3556` - // Minimum execution time: 19_914_000 picoseconds. - Weight::from_parts(20_892_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Minimum execution time: 18_503_000 picoseconds. + Weight::from_parts(19_497_000, 3556) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Whitelist WhitelistedCall (r:1 w:1) - /// Proof: Whitelist WhitelistedCall (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn remove_whitelisted_call() -> Weight { // Proof Size summary in bytes: - // Measured: `346` + // Measured: `446` // Estimated: `3556` - // Minimum execution time: 18_142_000 picoseconds. - Weight::from_parts(18_529_000, 3556) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Minimum execution time: 17_633_000 picoseconds. + Weight::from_parts(18_196_000, 3556) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Whitelist WhitelistedCall (r:1 w:1) - /// Proof: Whitelist WhitelistedCall (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:1 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: Measured) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 4194294]`. fn dispatch_whitelisted_call(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `422 + n * (1 Β±0)` - // Estimated: `3886 + n * (1 Β±0)` - // Minimum execution time: 30_671_000 picoseconds. - Weight::from_parts(31_197_000, 3886) + // Measured: `522 + n * (1 Β±0)` + // Estimated: `3986 + n * (1 Β±0)` + // Minimum execution time: 28_020_000 picoseconds. + Weight::from_parts(28_991_000, 3986) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_163, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(Weight::from_parts(1_171, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: Whitelist WhitelistedCall (r:1 w:1) - /// Proof: Whitelist WhitelistedCall (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 10000]`. fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `346` + // Measured: `446` // Estimated: `3556` - // Minimum execution time: 22_099_000 picoseconds. - Weight::from_parts(23_145_477, 3556) - // Standard Error: 5 - .saturating_add(Weight::from_parts(1_422, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Minimum execution time: 21_916_000 picoseconds. + Weight::from_parts(23_082_065, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(1_388, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Whitelist WhitelistedCall (r:1 w:1) - /// Proof: Whitelist WhitelistedCall (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn whitelist_call() -> Weight { // Proof Size summary in bytes: - // Measured: `217` + // Measured: `317` // Estimated: `3556` - // Minimum execution time: 19_914_000 picoseconds. - Weight::from_parts(20_892_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Minimum execution time: 18_503_000 picoseconds. + Weight::from_parts(19_497_000, 3556) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Whitelist WhitelistedCall (r:1 w:1) - /// Proof: Whitelist WhitelistedCall (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn remove_whitelisted_call() -> Weight { // Proof Size summary in bytes: - // Measured: `346` + // Measured: `446` // Estimated: `3556` - // Minimum execution time: 18_142_000 picoseconds. - Weight::from_parts(18_529_000, 3556) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Minimum execution time: 17_633_000 picoseconds. + Weight::from_parts(18_196_000, 3556) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Whitelist WhitelistedCall (r:1 w:1) - /// Proof: Whitelist WhitelistedCall (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:1 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: Measured) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 4194294]`. fn dispatch_whitelisted_call(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `422 + n * (1 Β±0)` - // Estimated: `3886 + n * (1 Β±0)` - // Minimum execution time: 30_671_000 picoseconds. - Weight::from_parts(31_197_000, 3886) + // Measured: `522 + n * (1 Β±0)` + // Estimated: `3986 + n * (1 Β±0)` + // Minimum execution time: 28_020_000 picoseconds. + Weight::from_parts(28_991_000, 3986) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_163, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(Weight::from_parts(1_171, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: Whitelist WhitelistedCall (r:1 w:1) - /// Proof: Whitelist WhitelistedCall (max_values: None, max_size: Some(40), added: 2515, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 10000]`. fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `346` + // Measured: `446` // Estimated: `3556` - // Minimum execution time: 22_099_000 picoseconds. - Weight::from_parts(23_145_477, 3556) - // Standard Error: 5 - .saturating_add(Weight::from_parts(1_422, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Minimum execution time: 21_916_000 picoseconds. + Weight::from_parts(23_082_065, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(1_388, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/substrate/primitives/inherents/src/lib.rs b/substrate/primitives/inherents/src/lib.rs index dd7c294f1e2..415319a6849 100644 --- a/substrate/primitives/inherents/src/lib.rs +++ b/substrate/primitives/inherents/src/lib.rs @@ -98,10 +98,10 @@ //! and production. //! //! ``` -//! # use sp_runtime::testing::ExtrinsicWrapper; +//! # use sp_runtime::{generic::UncheckedExtrinsic, testing::MockCallU64}; //! # use sp_inherents::{InherentIdentifier, InherentData}; //! # use futures::FutureExt; -//! # type Block = sp_runtime::testing::Block>; +//! # type Block = sp_runtime::testing::Block>; //! # const INHERENT_IDENTIFIER: InherentIdentifier = *b"testinh0"; //! # struct InherentDataProvider; //! # #[async_trait::async_trait] diff --git a/substrate/primitives/metadata-ir/src/lib.rs b/substrate/primitives/metadata-ir/src/lib.rs index edfa58f8618..66aff2c599f 100644 --- a/substrate/primitives/metadata-ir/src/lib.rs +++ b/substrate/primitives/metadata-ir/src/lib.rs @@ -84,7 +84,7 @@ mod test { call_ty: meta_type::<()>(), signature_ty: meta_type::<()>(), extra_ty: meta_type::<()>(), - signed_extensions: vec![], + extensions: vec![], }, ty: meta_type::<()>(), apis: vec![], diff --git a/substrate/primitives/metadata-ir/src/types.rs b/substrate/primitives/metadata-ir/src/types.rs index b107d20a8e2..e60881ed6b8 100644 --- a/substrate/primitives/metadata-ir/src/types.rs +++ b/substrate/primitives/metadata-ir/src/types.rs @@ -166,10 +166,11 @@ pub struct ExtrinsicMetadataIR { pub call_ty: T::Type, /// The type of the extrinsic's signature. pub signature_ty: T::Type, - /// The type of the outermost Extra enum. + /// The type of the outermost Extra/Extensions enum. + // TODO: metadata-v16: rename this to `extension_ty`. pub extra_ty: T::Type, - /// The signed extensions in the order they appear in the extrinsic. - pub signed_extensions: Vec>, + /// The transaction extensions in the order they appear in the extrinsic. + pub extensions: Vec>, } impl IntoPortable for ExtrinsicMetadataIR { @@ -183,27 +184,27 @@ impl IntoPortable for ExtrinsicMetadataIR { call_ty: registry.register_type(&self.call_ty), signature_ty: registry.register_type(&self.signature_ty), extra_ty: registry.register_type(&self.extra_ty), - signed_extensions: registry.map_into_portable(self.signed_extensions), + extensions: registry.map_into_portable(self.extensions), } } } /// Metadata of an extrinsic's signed extension. #[derive(Clone, PartialEq, Eq, Encode, Debug)] -pub struct SignedExtensionMetadataIR { +pub struct TransactionExtensionMetadataIR { /// The unique signed extension identifier, which may be different from the type name. pub identifier: T::String, /// The type of the signed extension, with the data to be included in the extrinsic. pub ty: T::Type, - /// The type of the additional signed data, with the data to be included in the signed payload + /// The type of the additional signed data, with the data to be included in the signed payload. pub additional_signed: T::Type, } -impl IntoPortable for SignedExtensionMetadataIR { - type Output = SignedExtensionMetadataIR; +impl IntoPortable for TransactionExtensionMetadataIR { + type Output = TransactionExtensionMetadataIR; fn into_portable(self, registry: &mut Registry) -> Self::Output { - SignedExtensionMetadataIR { + TransactionExtensionMetadataIR { identifier: self.identifier.into_portable(registry), ty: registry.register_type(&self.ty), additional_signed: registry.register_type(&self.additional_signed), diff --git a/substrate/primitives/metadata-ir/src/v14.rs b/substrate/primitives/metadata-ir/src/v14.rs index e1b7a24f765..ec08de34786 100644 --- a/substrate/primitives/metadata-ir/src/v14.rs +++ b/substrate/primitives/metadata-ir/src/v14.rs @@ -20,8 +20,8 @@ use super::types::{ ExtrinsicMetadataIR, MetadataIR, PalletCallMetadataIR, PalletConstantMetadataIR, PalletErrorMetadataIR, PalletEventMetadataIR, PalletMetadataIR, PalletStorageMetadataIR, - SignedExtensionMetadataIR, StorageEntryMetadataIR, StorageEntryModifierIR, StorageEntryTypeIR, - StorageHasherIR, + StorageEntryMetadataIR, StorageEntryModifierIR, StorageEntryTypeIR, StorageHasherIR, + TransactionExtensionMetadataIR, }; use frame_metadata::v14::{ @@ -137,8 +137,8 @@ impl From for PalletErrorMetadata { } } -impl From for SignedExtensionMetadata { - fn from(ir: SignedExtensionMetadataIR) -> Self { +impl From for SignedExtensionMetadata { + fn from(ir: TransactionExtensionMetadataIR) -> Self { SignedExtensionMetadata { identifier: ir.identifier, ty: ir.ty, @@ -152,7 +152,7 @@ impl From for ExtrinsicMetadata { ExtrinsicMetadata { ty: ir.ty, version: ir.version, - signed_extensions: ir.signed_extensions.into_iter().map(Into::into).collect(), + signed_extensions: ir.extensions.into_iter().map(Into::into).collect(), } } } diff --git a/substrate/primitives/metadata-ir/src/v15.rs b/substrate/primitives/metadata-ir/src/v15.rs index a942eb73223..dc5ce1c88fd 100644 --- a/substrate/primitives/metadata-ir/src/v15.rs +++ b/substrate/primitives/metadata-ir/src/v15.rs @@ -21,7 +21,7 @@ use crate::OuterEnumsIR; use super::types::{ ExtrinsicMetadataIR, MetadataIR, PalletMetadataIR, RuntimeApiMetadataIR, - RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, SignedExtensionMetadataIR, + RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, TransactionExtensionMetadataIR, }; use frame_metadata::v15::{ @@ -87,8 +87,8 @@ impl From for PalletMetadata { } } -impl From for SignedExtensionMetadata { - fn from(ir: SignedExtensionMetadataIR) -> Self { +impl From for SignedExtensionMetadata { + fn from(ir: TransactionExtensionMetadataIR) -> Self { SignedExtensionMetadata { identifier: ir.identifier, ty: ir.ty, @@ -105,7 +105,7 @@ impl From for ExtrinsicMetadata { call_ty: ir.call_ty, signature_ty: ir.signature_ty, extra_ty: ir.extra_ty, - signed_extensions: ir.signed_extensions.into_iter().map(Into::into).collect(), + signed_extensions: ir.extensions.into_iter().map(Into::into).collect(), } } } diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index cacfc059722..758cd8944fd 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -17,6 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +tuplex = { version = "0.1.2", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } either = { version = "1.5", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } @@ -67,6 +68,7 @@ std = [ "sp-std/std", "sp-tracing/std", "sp-weights/std", + "tuplex/std", ] # Serde support without relying on std features. diff --git a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs index 44325920bee..e28c8fe424a 100644 --- a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs @@ -18,81 +18,115 @@ //! Generic implementation of an extrinsic that has passed the verification //! stage. +use codec::Encode; + use crate::{ traits::{ - self, DispatchInfoOf, Dispatchable, MaybeDisplay, Member, PostDispatchInfoOf, - SignedExtension, ValidateUnsigned, + self, transaction_extension::TransactionExtension, DispatchInfoOf, DispatchTransaction, + Dispatchable, MaybeDisplay, Member, PostDispatchInfoOf, ValidateUnsigned, }, transaction_validity::{TransactionSource, TransactionValidity}, }; +/// The kind of extrinsic this is, including any fields required of that kind. This is basically +/// the full extrinsic except the `Call`. +#[derive(PartialEq, Eq, Clone, sp_core::RuntimeDebug)] +pub enum ExtrinsicFormat { + /// Extrinsic is bare; it must pass either the bare forms of `TransactionExtension` or + /// `ValidateUnsigned`, both deprecated, or alternatively a `ProvideInherent`. + Bare, + /// Extrinsic has a default `Origin` of `Signed(AccountId)` and must pass all + /// `TransactionExtension`s regular checks and includes all extension data. + Signed(AccountId, Extension), + /// Extrinsic has a default `Origin` of `None` and must pass all `TransactionExtension`s. + /// regular checks and includes all extension data. + General(Extension), +} + +// TODO: Rename ValidateUnsigned to ValidateInherent +// TODO: Consider changing ValidateInherent API to avoid need for duplicating validate +// code into pre_dispatch (rename that to `prepare`). +// TODO: New extrinsic type corresponding to `ExtrinsicFormat::General`, which is +// unsigned but includes extension data. +// TODO: Move usage of `signed` to `format`: +// - Inherent instead of None. +// - Signed(id, extension) instead of Some((id, extra)). +// - Introduce General(extension) for one without a signature. + /// Definition of something that the external world might want to say; its existence implies that it /// has been checked and is good, particularly with regards to the signature. /// /// This is typically passed into [`traits::Applyable::apply`], which should execute /// [`CheckedExtrinsic::function`], alongside all other bits and bobs. #[derive(PartialEq, Eq, Clone, sp_core::RuntimeDebug)] -pub struct CheckedExtrinsic { +pub struct CheckedExtrinsic { /// Who this purports to be from and the number of extrinsics have come before /// from the same signer, if anyone (note this is not a signature). - pub signed: Option<(AccountId, Extra)>, + pub format: ExtrinsicFormat, /// The function that should be called. pub function: Call, } -impl traits::Applyable - for CheckedExtrinsic +impl traits::Applyable + for CheckedExtrinsic where AccountId: Member + MaybeDisplay, - Call: Member + Dispatchable, - Extra: SignedExtension, + Call: Member + Dispatchable + Encode, + Extension: TransactionExtension, RuntimeOrigin: From>, { type Call = Call; - fn validate>( + fn validate>( &self, - // TODO [#5006;ToDr] should source be passed to `SignedExtension`s? - // Perhaps a change for 2.0 to avoid breaking too much APIs? source: TransactionSource, info: &DispatchInfoOf, len: usize, ) -> TransactionValidity { - if let Some((ref id, ref extra)) = self.signed { - Extra::validate(extra, id, &self.function, info, len) - } else { - let valid = Extra::validate_unsigned(&self.function, info, len)?; - let unsigned_validation = U::validate_unsigned(source, &self.function)?; - Ok(valid.combine_with(unsigned_validation)) + match self.format { + ExtrinsicFormat::Bare => { + let inherent_validation = I::validate_unsigned(source, &self.function)?; + #[allow(deprecated)] + let legacy_validation = Extension::validate_bare_compat(&self.function, info, len)?; + Ok(legacy_validation.combine_with(inherent_validation)) + }, + ExtrinsicFormat::Signed(ref signer, ref extension) => { + let origin = Some(signer.clone()).into(); + extension.validate_only(origin, &self.function, info, len).map(|x| x.0) + }, + ExtrinsicFormat::General(ref extension) => + extension.validate_only(None.into(), &self.function, info, len).map(|x| x.0), } } - fn apply>( + fn apply>( self, info: &DispatchInfoOf, len: usize, ) -> crate::ApplyExtrinsicResultWithInfo> { - let (maybe_who, maybe_pre) = if let Some((id, extra)) = self.signed { - let pre = Extra::pre_dispatch(extra, &id, &self.function, info, len)?; - (Some(id), Some(pre)) - } else { - Extra::pre_dispatch_unsigned(&self.function, info, len)?; - U::pre_dispatch(&self.function)?; - (None, None) - }; - let res = self.function.dispatch(RuntimeOrigin::from(maybe_who)); - let post_info = match res { - Ok(info) => info, - Err(err) => err.post_info, - }; - Extra::post_dispatch( - maybe_pre, - info, - &post_info, - len, - &res.map(|_| ()).map_err(|e| e.error), - )?; - Ok(res) + match self.format { + ExtrinsicFormat::Bare => { + I::pre_dispatch(&self.function)?; + // TODO: Remove below once `pre_dispatch_unsigned` is removed from `LegacyExtension` + // or `LegacyExtension` is removed. + #[allow(deprecated)] + Extension::validate_bare_compat(&self.function, info, len)?; + #[allow(deprecated)] + Extension::pre_dispatch_bare_compat(&self.function, info, len)?; + let res = self.function.dispatch(None.into()); + let post_info = res.unwrap_or_else(|err| err.post_info); + let pd_res = res.map(|_| ()).map_err(|e| e.error); + // TODO: Remove below once `pre_dispatch_unsigned` is removed from `LegacyExtension` + // or `LegacyExtension` is removed. + #[allow(deprecated)] + Extension::post_dispatch_bare_compat(info, &post_info, len, &pd_res)?; + Ok(res) + }, + ExtrinsicFormat::Signed(signer, extension) => + extension.dispatch_transaction(Some(signer).into(), self.function, info, len), + ExtrinsicFormat::General(extension) => + extension.dispatch_transaction(None.into(), self.function, info, len), + } } } diff --git a/substrate/primitives/runtime/src/generic/mod.rs b/substrate/primitives/runtime/src/generic/mod.rs index 3687f7cdb3b..5713c166b04 100644 --- a/substrate/primitives/runtime/src/generic/mod.rs +++ b/substrate/primitives/runtime/src/generic/mod.rs @@ -29,9 +29,9 @@ mod unchecked_extrinsic; pub use self::{ block::{Block, BlockId, SignedBlock}, - checked_extrinsic::CheckedExtrinsic, + checked_extrinsic::{CheckedExtrinsic, ExtrinsicFormat}, digest::{Digest, DigestItem, DigestItemRef, OpaqueDigestItemId}, era::{Era, Phase}, header::Header, - unchecked_extrinsic::{SignedPayload, UncheckedExtrinsic}, + unchecked_extrinsic::{Preamble, SignedPayload, UncheckedExtrinsic}, }; diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 5b54caf597b..44dfda13d94 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -18,10 +18,11 @@ //! Generic implementation of an unchecked (pre-verification) extrinsic. use crate::{ - generic::CheckedExtrinsic, + generic::{CheckedExtrinsic, ExtrinsicFormat}, traits::{ - self, Checkable, Extrinsic, ExtrinsicMetadata, IdentifyAccount, MaybeDisplay, Member, - SignaturePayload, SignedExtension, + self, transaction_extension::TransactionExtensionBase, Checkable, Dispatchable, Extrinsic, + ExtrinsicMetadata, IdentifyAccount, MaybeDisplay, Member, SignaturePayload, + TransactionExtension, }, transaction_validity::{InvalidTransaction, TransactionValidityError}, OpaqueExtrinsic, @@ -40,8 +41,60 @@ use sp_std::{fmt, prelude::*}; /// the decoding fails. const EXTRINSIC_FORMAT_VERSION: u8 = 4; -/// The `SingaturePayload` of `UncheckedExtrinsic`. -type UncheckedSignaturePayload = (Address, Signature, Extra); +/// The `SignaturePayload` of `UncheckedExtrinsic`. +type UncheckedSignaturePayload = (Address, Signature, Extension); + +impl SignaturePayload + for UncheckedSignaturePayload +{ + type SignatureAddress = Address; + type Signature = Signature; + type SignatureExtra = Extension; +} + +/// A "header" for extrinsics leading up to the call itself. Determines the type of extrinsic and +/// holds any necessary specialized data. +#[derive(Eq, PartialEq, Clone, Encode, Decode)] +pub enum Preamble { + /// An extrinsic without a signature or any extension. This means it's either an inherent or + /// an old-school "Unsigned" (we don't use that terminology any more since it's confusable with + /// the general transaction which is without a signature but does have an extension). + /// + /// NOTE: In the future, once we remove `ValidateUnsigned`, this will only serve Inherent + /// extrinsics and thus can be renamed to `Inherent`. + #[codec(index = 0b00000100)] + Bare, + /// An old-school transaction extrinsic which includes a signature of some hard-coded crypto. + #[codec(index = 0b10000100)] + Signed(Address, Signature, Extension), + /// A new-school transaction extrinsic which does not include a signature. + #[codec(index = 0b01000100)] + General(Extension), +} + +impl Preamble { + /// Returns `Some` if this is a signed extrinsic, together with the relevant inner fields. + pub fn to_signed(self) -> Option<(Address, Signature, Extension)> { + match self { + Self::Signed(a, s, e) => Some((a, s, e)), + _ => None, + } + } +} + +impl fmt::Debug for Preamble +where + Address: fmt::Debug, + Extension: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Bare => write!(f, "Bare"), + Self::Signed(address, _, tx_ext) => write!(f, "Signed({:?}, {:?})", address, tx_ext), + Self::General(tx_ext) => write!(f, "General({:?})", tx_ext), + } + } +} /// An extrinsic right from the external world. This is unchecked and so can contain a signature. /// @@ -65,41 +118,28 @@ type UncheckedSignaturePayload = (Address, Signature, /// This can be checked using [`Checkable`], yielding a [`CheckedExtrinsic`], which is the /// counterpart of this type after its signature (and other non-negotiable validity checks) have /// passed. -#[derive(PartialEq, Eq, Clone)] -pub struct UncheckedExtrinsic -where - Extra: SignedExtension, -{ - /// The signature, address, number of extrinsics have come before from the same signer and an - /// era describing the longevity of this transaction, if this is a signed extrinsic. - /// - /// `None` if it is unsigned or an inherent. - pub signature: Option>, +#[derive(PartialEq, Eq, Clone, Debug)] +pub struct UncheckedExtrinsic { + /// Information regarding the type of extrinsic this is (inherent or transaction) as well as + /// associated extension (`Extension`) data if it's a transaction and a possible signature. + pub preamble: Preamble, /// The function that should be called. pub function: Call, } -impl SignaturePayload - for UncheckedSignaturePayload -{ - type SignatureAddress = Address; - type Signature = Signature; - type SignatureExtra = Extra; -} - /// Manual [`TypeInfo`] implementation because of custom encoding. The data is a valid encoded /// `Vec`, but requires some logic to extract the signature and payload. /// /// See [`UncheckedExtrinsic::encode`] and [`UncheckedExtrinsic::decode`]. -impl TypeInfo - for UncheckedExtrinsic +impl TypeInfo + for UncheckedExtrinsic where Address: StaticTypeInfo, Call: StaticTypeInfo, Signature: StaticTypeInfo, - Extra: SignedExtension + StaticTypeInfo, + Extension: StaticTypeInfo, { - type Identity = UncheckedExtrinsic; + type Identity = UncheckedExtrinsic; fn type_info() -> Type { Type::builder() @@ -111,7 +151,7 @@ where TypeParameter::new("Address", Some(meta_type::
())), TypeParameter::new("Call", Some(meta_type::())), TypeParameter::new("Signature", Some(meta_type::())), - TypeParameter::new("Extra", Some(meta_type::())), + TypeParameter::new("Extra", Some(meta_type::())), ]) .docs(&["UncheckedExtrinsic raw bytes, requires custom decoding routine"]) // Because of the custom encoding, we can only accurately describe the encoding as an @@ -121,66 +161,113 @@ where } } -impl - UncheckedExtrinsic -{ - /// New instance of a signed extrinsic aka "transaction". - pub fn new_signed(function: Call, signed: Address, signature: Signature, extra: Extra) -> Self { - Self { signature: Some((signed, signature, extra)), function } +impl UncheckedExtrinsic { + /// New instance of a bare (ne unsigned) extrinsic. This could be used for an inherent or an + /// old-school "unsigned transaction" (which are new being deprecated in favour of general + /// transactions). + #[deprecated = "Use new_bare instead"] + pub fn new_unsigned(function: Call) -> Self { + Self::new_bare(function) } - /// New instance of an unsigned extrinsic aka "inherent". - pub fn new_unsigned(function: Call) -> Self { - Self { signature: None, function } + /// Returns `true` if this extrinsic instance is an inherent, `false`` otherwise. + pub fn is_inherent(&self) -> bool { + matches!(self.preamble, Preamble::Bare) + } + + /// Returns `true` if this extrinsic instance is an old-school signed transaction, `false` + /// otherwise. + pub fn is_signed(&self) -> bool { + matches!(self.preamble, Preamble::Signed(..)) + } + + /// Create an `UncheckedExtrinsic` from a `Preamble` and the actual `Call`. + pub fn from_parts(function: Call, preamble: Preamble) -> Self { + Self { preamble, function } + } + + /// New instance of a bare (ne unsigned) extrinsic. + pub fn new_bare(function: Call) -> Self { + Self { preamble: Preamble::Bare, function } + } + + /// New instance of an old-school signed transaction. + pub fn new_signed( + function: Call, + signed: Address, + signature: Signature, + tx_ext: Extension, + ) -> Self { + Self { preamble: Preamble::Signed(signed, signature, tx_ext), function } + } + + /// New instance of an new-school unsigned transaction. + pub fn new_transaction(function: Call, tx_ext: Extension) -> Self { + Self { preamble: Preamble::General(tx_ext), function } } } -impl - Extrinsic for UncheckedExtrinsic +// TODO: We can get rid of this trait and just use UncheckedExtrinsic directly. + +impl Extrinsic + for UncheckedExtrinsic { type Call = Call; - type SignaturePayload = UncheckedSignaturePayload; + type SignaturePayload = UncheckedSignaturePayload; + + fn is_bare(&self) -> bool { + matches!(self.preamble, Preamble::Bare) + } fn is_signed(&self) -> Option { - Some(self.signature.is_some()) + Some(matches!(self.preamble, Preamble::Signed(..))) } fn new(function: Call, signed_data: Option) -> Option { Some(if let Some((address, signature, extra)) = signed_data { Self::new_signed(function, address, signature, extra) } else { - Self::new_unsigned(function) + Self::new_bare(function) }) } + + fn new_inherent(function: Call) -> Self { + Self::new_bare(function) + } } -impl Checkable - for UncheckedExtrinsic +impl Checkable + for UncheckedExtrinsic where LookupSource: Member + MaybeDisplay, - Call: Encode + Member, + Call: Encode + Member + Dispatchable, Signature: Member + traits::Verify, ::Signer: IdentifyAccount, - Extra: SignedExtension, + Extension: Encode + TransactionExtension, AccountId: Member + MaybeDisplay, Lookup: traits::Lookup, { - type Checked = CheckedExtrinsic; + type Checked = CheckedExtrinsic; fn check(self, lookup: &Lookup) -> Result { - Ok(match self.signature { - Some((signed, signature, extra)) => { + Ok(match self.preamble { + Preamble::Signed(signed, signature, tx_ext) => { let signed = lookup.lookup(signed)?; - let raw_payload = SignedPayload::new(self.function, extra)?; + // CHECK! Should this not contain implicit? + let raw_payload = SignedPayload::new(self.function, tx_ext)?; if !raw_payload.using_encoded(|payload| signature.verify(payload, &signed)) { return Err(InvalidTransaction::BadProof.into()) } - - let (function, extra, _) = raw_payload.deconstruct(); - CheckedExtrinsic { signed: Some((signed, extra)), function } + let (function, tx_ext, _) = raw_payload.deconstruct(); + CheckedExtrinsic { format: ExtrinsicFormat::Signed(signed, tx_ext), function } + }, + Preamble::General(tx_ext) => CheckedExtrinsic { + format: ExtrinsicFormat::General(tx_ext), + function: self.function, }, - None => CheckedExtrinsic { signed: None, function: self.function }, + Preamble::Bare => + CheckedExtrinsic { format: ExtrinsicFormat::Bare, function: self.function }, }) } @@ -189,91 +276,38 @@ where self, lookup: &Lookup, ) -> Result { - Ok(match self.signature { - Some((signed, _, extra)) => { + Ok(match self.preamble { + Preamble::Signed(signed, _, extra) => { let signed = lookup.lookup(signed)?; - let raw_payload = SignedPayload::new(self.function, extra)?; - let (function, extra, _) = raw_payload.deconstruct(); - CheckedExtrinsic { signed: Some((signed, extra)), function } + CheckedExtrinsic { + format: ExtrinsicFormat::Signed(signed, extra), + function: self.function, + } }, - None => CheckedExtrinsic { signed: None, function: self.function }, + Preamble::General(extra) => CheckedExtrinsic { + format: ExtrinsicFormat::General(extra), + function: self.function, + }, + Preamble::Bare => + CheckedExtrinsic { format: ExtrinsicFormat::Bare, function: self.function }, }) } } -impl ExtrinsicMetadata - for UncheckedExtrinsic -where - Extra: SignedExtension, +impl> + ExtrinsicMetadata for UncheckedExtrinsic { const VERSION: u8 = EXTRINSIC_FORMAT_VERSION; - type SignedExtensions = Extra; -} - -/// A payload that has been signed for an unchecked extrinsics. -/// -/// Note that the payload that we sign to produce unchecked extrinsic signature -/// is going to be different than the `SignaturePayload` - so the thing the extrinsic -/// actually contains. -pub struct SignedPayload((Call, Extra, Extra::AdditionalSigned)); - -impl SignedPayload -where - Call: Encode, - Extra: SignedExtension, -{ - /// Create new `SignedPayload`. - /// - /// This function may fail if `additional_signed` of `Extra` is not available. - pub fn new(call: Call, extra: Extra) -> Result { - let additional_signed = extra.additional_signed()?; - let raw_payload = (call, extra, additional_signed); - Ok(Self(raw_payload)) - } - - /// Create new `SignedPayload` from raw components. - pub fn from_raw(call: Call, extra: Extra, additional_signed: Extra::AdditionalSigned) -> Self { - Self((call, extra, additional_signed)) - } - - /// Deconstruct the payload into it's components. - pub fn deconstruct(self) -> (Call, Extra, Extra::AdditionalSigned) { - self.0 - } + type Extra = Extension; } -impl Encode for SignedPayload -where - Call: Encode, - Extra: SignedExtension, -{ - /// Get an encoded version of this payload. - /// - /// Payloads longer than 256 bytes are going to be `blake2_256`-hashed. - fn using_encoded R>(&self, f: F) -> R { - self.0.using_encoded(|payload| { - if payload.len() > 256 { - f(&blake2_256(payload)[..]) - } else { - f(payload) - } - }) - } -} - -impl EncodeLike for SignedPayload -where - Call: Encode, - Extra: SignedExtension, -{ -} - -impl Decode for UncheckedExtrinsic +impl Decode + for UncheckedExtrinsic where Address: Decode, Signature: Decode, Call: Decode, - Extra: SignedExtension, + Extension: Decode, { fn decode(input: &mut I) -> Result { // This is a little more complicated than usual since the binary format must be compatible @@ -282,15 +316,7 @@ where let expected_length: Compact = Decode::decode(input)?; let before_length = input.remaining_len()?; - let version = input.read_byte()?; - - let is_signed = version & 0b1000_0000 != 0; - let version = version & 0b0111_1111; - if version != EXTRINSIC_FORMAT_VERSION { - return Err("Invalid transaction version".into()) - } - - let signature = is_signed.then(|| Decode::decode(input)).transpose()?; + let preamble = Decode::decode(input)?; let function = Decode::decode(input)?; if let Some((before_length, after_length)) = @@ -303,31 +329,20 @@ where } } - Ok(Self { signature, function }) + Ok(Self { preamble, function }) } } #[docify::export(unchecked_extrinsic_encode_impl)] -impl Encode for UncheckedExtrinsic +impl Encode + for UncheckedExtrinsic where - Address: Encode, - Signature: Encode, + Preamble: Encode, Call: Encode, - Extra: SignedExtension, + Extension: Encode, { fn encode(&self) -> Vec { - let mut tmp = Vec::with_capacity(sp_std::mem::size_of::()); - - // 1 byte version id. - match self.signature.as_ref() { - Some(s) => { - tmp.push(EXTRINSIC_FORMAT_VERSION | 0b1000_0000); - s.encode_to(&mut tmp); - }, - None => { - tmp.push(EXTRINSIC_FORMAT_VERSION & 0b0111_1111); - }, - } + let mut tmp = self.preamble.encode(); self.function.encode_to(&mut tmp); let compact_len = codec::Compact::(tmp.len() as u32); @@ -342,19 +357,19 @@ where } } -impl EncodeLike - for UncheckedExtrinsic +impl EncodeLike + for UncheckedExtrinsic where Address: Encode, Signature: Encode, - Call: Encode, - Extra: SignedExtension, + Call: Encode + Dispatchable, + Extension: TransactionExtension, { } #[cfg(feature = "serde")] -impl serde::Serialize - for UncheckedExtrinsic +impl serde::Serialize + for UncheckedExtrinsic { fn serialize(&self, seq: S) -> Result where @@ -365,45 +380,88 @@ impl s } #[cfg(feature = "serde")] -impl<'a, Address: Decode, Signature: Decode, Call: Decode, Extra: SignedExtension> - serde::Deserialize<'a> for UncheckedExtrinsic +impl<'a, Address: Decode, Signature: Decode, Call: Decode, Extension: Decode> serde::Deserialize<'a> + for UncheckedExtrinsic { fn deserialize(de: D) -> Result where D: serde::Deserializer<'a>, { let r = sp_core::bytes::deserialize(de)?; - Decode::decode(&mut &r[..]) + Self::decode(&mut &r[..]) .map_err(|e| serde::de::Error::custom(format!("Decode error: {}", e))) } } -impl fmt::Debug - for UncheckedExtrinsic +/// A payload that has been signed for an unchecked extrinsics. +/// +/// Note that the payload that we sign to produce unchecked extrinsic signature +/// is going to be different than the `SignaturePayload` - so the thing the extrinsic +/// actually contains. +pub struct SignedPayload( + (Call, Extension, Extension::Implicit), +); + +impl SignedPayload where - Address: fmt::Debug, - Call: fmt::Debug, - Extra: SignedExtension, + Call: Encode + Dispatchable, + Extension: TransactionExtensionBase, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "UncheckedExtrinsic({:?}, {:?})", - self.signature.as_ref().map(|x| (&x.0, &x.2)), - self.function, - ) + /// Create new `SignedPayload`. + /// + /// This function may fail if `implicit` of `Extension` is not available. + pub fn new(call: Call, tx_ext: Extension) -> Result { + let implicit = Extension::implicit(&tx_ext)?; + let raw_payload = (call, tx_ext, implicit); + Ok(Self(raw_payload)) + } + + /// Create new `SignedPayload` from raw components. + pub fn from_raw(call: Call, tx_ext: Extension, implicit: Extension::Implicit) -> Self { + Self((call, tx_ext, implicit)) + } + + /// Deconstruct the payload into it's components. + pub fn deconstruct(self) -> (Call, Extension, Extension::Implicit) { + self.0 } } -impl From> - for OpaqueExtrinsic +impl Encode for SignedPayload +where + Call: Encode + Dispatchable, + Extension: TransactionExtensionBase, +{ + /// Get an encoded version of this payload. + /// + /// Payloads longer than 256 bytes are going to be `blake2_256`-hashed. + fn using_encoded R>(&self, f: F) -> R { + self.0.using_encoded(|payload| { + if payload.len() > 256 { + f(&blake2_256(payload)[..]) + } else { + f(payload) + } + }) + } +} + +impl EncodeLike for SignedPayload +where + Call: Encode + Dispatchable, + Extension: TransactionExtensionBase, +{ +} + +impl + From> for OpaqueExtrinsic where Address: Encode, Signature: Encode, Call: Encode, - Extra: SignedExtension, + Extension: Encode, { - fn from(extrinsic: UncheckedExtrinsic) -> Self { + fn from(extrinsic: UncheckedExtrinsic) -> Self { Self::from_bytes(extrinsic.encode().as_slice()).expect( "both OpaqueExtrinsic and UncheckedExtrinsic have encoding that is compatible with \ raw Vec encoding; qed", @@ -411,60 +469,198 @@ where } } +#[cfg(test)] +mod legacy { + use codec::{Compact, Decode, Encode, EncodeLike, Error, Input}; + use scale_info::{ + build::Fields, meta_type, Path, StaticTypeInfo, Type, TypeInfo, TypeParameter, + }; + + pub type OldUncheckedSignaturePayload = (Address, Signature, Extra); + + #[derive(PartialEq, Eq, Clone, Debug)] + pub struct OldUncheckedExtrinsic { + pub signature: Option>, + pub function: Call, + } + + impl TypeInfo + for OldUncheckedExtrinsic + where + Address: StaticTypeInfo, + Call: StaticTypeInfo, + Signature: StaticTypeInfo, + Extra: StaticTypeInfo, + { + type Identity = OldUncheckedExtrinsic; + + fn type_info() -> Type { + Type::builder() + .path(Path::new("UncheckedExtrinsic", module_path!())) + // Include the type parameter types, even though they are not used directly in any + // of the described fields. These type definitions can be used by downstream + // consumers to help construct the custom decoding from the opaque bytes (see + // below). + .type_params(vec![ + TypeParameter::new("Address", Some(meta_type::
())), + TypeParameter::new("Call", Some(meta_type::())), + TypeParameter::new("Signature", Some(meta_type::())), + TypeParameter::new("Extra", Some(meta_type::())), + ]) + .docs(&["OldUncheckedExtrinsic raw bytes, requires custom decoding routine"]) + // Because of the custom encoding, we can only accurately describe the encoding as + // an opaque `Vec`. Downstream consumers will need to manually implement the + // codec to encode/decode the `signature` and `function` fields. + .composite(Fields::unnamed().field(|f| f.ty::>())) + } + } + + impl OldUncheckedExtrinsic { + pub fn new_signed( + function: Call, + signed: Address, + signature: Signature, + extra: Extra, + ) -> Self { + Self { signature: Some((signed, signature, extra)), function } + } + + pub fn new_unsigned(function: Call) -> Self { + Self { signature: None, function } + } + } + + impl Decode + for OldUncheckedExtrinsic + where + Address: Decode, + Signature: Decode, + Call: Decode, + Extra: Decode, + { + fn decode(input: &mut I) -> Result { + // This is a little more complicated than usual since the binary format must be + // compatible with SCALE's generic `Vec` type. Basically this just means accepting + // that there will be a prefix of vector length. + let expected_length: Compact = Decode::decode(input)?; + let before_length = input.remaining_len()?; + + let version = input.read_byte()?; + + let is_signed = version & 0b1000_0000 != 0; + let version = version & 0b0111_1111; + if version != 4u8 { + return Err("Invalid transaction version".into()) + } + + let signature = is_signed.then(|| Decode::decode(input)).transpose()?; + let function = Decode::decode(input)?; + + if let Some((before_length, after_length)) = + input.remaining_len()?.and_then(|a| before_length.map(|b| (b, a))) + { + let length = before_length.saturating_sub(after_length); + + if length != expected_length.0 as usize { + return Err("Invalid length prefix".into()) + } + } + + Ok(Self { signature, function }) + } + } + + #[docify::export(unchecked_extrinsic_encode_impl)] + impl Encode + for OldUncheckedExtrinsic + where + Address: Encode, + Signature: Encode, + Call: Encode, + Extra: Encode, + { + fn encode(&self) -> Vec { + let mut tmp = Vec::with_capacity(sp_std::mem::size_of::()); + + // 1 byte version id. + match self.signature.as_ref() { + Some(s) => { + tmp.push(4u8 | 0b1000_0000); + s.encode_to(&mut tmp); + }, + None => { + tmp.push(4u8 & 0b0111_1111); + }, + } + self.function.encode_to(&mut tmp); + + let compact_len = codec::Compact::(tmp.len() as u32); + + // Allocate the output buffer with the correct length + let mut output = Vec::with_capacity(compact_len.size_hint() + tmp.len()); + + compact_len.encode_to(&mut output); + output.extend(tmp); + + output + } + } + + impl EncodeLike + for OldUncheckedExtrinsic + where + Address: Encode, + Signature: Encode, + Call: Encode, + Extra: Encode, + { + } +} + #[cfg(test)] mod tests { - use super::*; + use super::{legacy::OldUncheckedExtrinsic, *}; use crate::{ codec::{Decode, Encode}, + impl_tx_ext_default, testing::TestSignature as TestSig, - traits::{DispatchInfoOf, IdentityLookup, SignedExtension}, + traits::{FakeDispatchable, IdentityLookup, TransactionExtension}, }; use sp_io::hashing::blake2_256; type TestContext = IdentityLookup; type TestAccountId = u64; - type TestCall = Vec; + type TestCall = FakeDispatchable>; const TEST_ACCOUNT: TestAccountId = 0; // NOTE: this is demonstration. One can simply use `()` for testing. #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, Ord, PartialOrd, TypeInfo)] - struct TestExtra; - impl SignedExtension for TestExtra { - const IDENTIFIER: &'static str = "TestExtra"; - type AccountId = u64; - type Call = (); - type AdditionalSigned = (); + struct DummyExtension; + impl TransactionExtensionBase for DummyExtension { + const IDENTIFIER: &'static str = "DummyExtension"; + type Implicit = (); + } + impl TransactionExtension for DummyExtension { + type Val = (); type Pre = (); - - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } + impl_tx_ext_default!(TestCall; Context; validate prepare); } - type Ex = UncheckedExtrinsic; - type CEx = CheckedExtrinsic; + type Ex = UncheckedExtrinsic; + type CEx = CheckedExtrinsic; #[test] fn unsigned_codec_should_work() { - let ux = Ex::new_unsigned(vec![0u8; 0]); + let call: TestCall = vec![0u8; 0].into(); + let ux = Ex::new_inherent(call); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); } #[test] fn invalid_length_prefix_is_detected() { - let ux = Ex::new_unsigned(vec![0u8; 0]); + let ux = Ex::new_inherent(vec![0u8; 0].into()); let mut encoded = ux.encode(); let length = Compact::::decode(&mut &encoded[..]).unwrap(); @@ -473,13 +669,20 @@ mod tests { assert_eq!(Ex::decode(&mut &encoded[..]), Err("Invalid length prefix".into())); } + #[test] + fn transaction_codec_should_work() { + let ux = Ex::new_transaction(vec![0u8; 0].into(), DummyExtension); + let encoded = ux.encode(); + assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); + } + #[test] fn signed_codec_should_work() { let ux = Ex::new_signed( - vec![0u8; 0], + vec![0u8; 0].into(), TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, (vec![0u8; 0], TestExtra).encode()), - TestExtra, + TestSig(TEST_ACCOUNT, (vec![0u8; 0], DummyExtension).encode()), + DummyExtension, ); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); @@ -488,13 +691,13 @@ mod tests { #[test] fn large_signed_codec_should_work() { let ux = Ex::new_signed( - vec![0u8; 0], + vec![0u8; 0].into(), TEST_ACCOUNT, TestSig( TEST_ACCOUNT, - (vec![0u8; 257], TestExtra).using_encoded(blake2_256)[..].to_owned(), + (vec![0u8; 257], DummyExtension).using_encoded(blake2_256)[..].to_owned(), ), - TestExtra, + DummyExtension, ); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); @@ -502,44 +705,63 @@ mod tests { #[test] fn unsigned_check_should_work() { - let ux = Ex::new_unsigned(vec![0u8; 0]); - assert!(!ux.is_signed().unwrap_or(false)); - assert!(>::check(ux, &Default::default()).is_ok()); + let ux = Ex::new_inherent(vec![0u8; 0].into()); + assert!(ux.is_inherent()); + assert_eq!( + >::check(ux, &Default::default()), + Ok(CEx { format: ExtrinsicFormat::Bare, function: vec![0u8; 0].into() }), + ); } #[test] fn badly_signed_check_should_fail() { let ux = Ex::new_signed( - vec![0u8; 0], + vec![0u8; 0].into(), TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, vec![0u8; 0]), - TestExtra, + TestSig(TEST_ACCOUNT, vec![0u8; 0].into()), + DummyExtension, ); - assert!(ux.is_signed().unwrap_or(false)); + assert!(!ux.is_inherent()); assert_eq!( >::check(ux, &Default::default()), Err(InvalidTransaction::BadProof.into()), ); } + #[test] + fn transaction_check_should_work() { + let ux = Ex::new_transaction(vec![0u8; 0].into(), DummyExtension); + assert!(!ux.is_inherent()); + assert_eq!( + >::check(ux, &Default::default()), + Ok(CEx { + format: ExtrinsicFormat::General(DummyExtension), + function: vec![0u8; 0].into() + }), + ); + } + #[test] fn signed_check_should_work() { let ux = Ex::new_signed( - vec![0u8; 0], + vec![0u8; 0].into(), TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, (vec![0u8; 0], TestExtra).encode()), - TestExtra, + TestSig(TEST_ACCOUNT, (vec![0u8; 0], DummyExtension).encode()), + DummyExtension, ); - assert!(ux.is_signed().unwrap_or(false)); + assert!(!ux.is_inherent()); assert_eq!( >::check(ux, &Default::default()), - Ok(CEx { signed: Some((TEST_ACCOUNT, TestExtra)), function: vec![0u8; 0] }), + Ok(CEx { + format: ExtrinsicFormat::Signed(TEST_ACCOUNT, DummyExtension), + function: vec![0u8; 0].into() + }), ); } #[test] fn encoding_matches_vec() { - let ex = Ex::new_unsigned(vec![0u8; 0]); + let ex = Ex::new_inherent(vec![0u8; 0].into()); let encoded = ex.encode(); let decoded = Ex::decode(&mut encoded.as_slice()).unwrap(); assert_eq!(decoded, ex); @@ -549,7 +771,7 @@ mod tests { #[test] fn conversion_to_opaque() { - let ux = Ex::new_unsigned(vec![0u8; 0]); + let ux = Ex::new_inherent(vec![0u8; 0].into()); let encoded = ux.encode(); let opaque: OpaqueExtrinsic = ux.into(); let opaque_encoded = opaque.encode(); @@ -558,10 +780,62 @@ mod tests { #[test] fn large_bad_prefix_should_work() { - let encoded = Compact::::from(u32::MAX).encode(); - assert_eq!( - Ex::decode(&mut &encoded[..]), - Err(Error::from("Not enough data to fill buffer")) - ); + let encoded = (Compact::::from(u32::MAX), Preamble::<(), (), ()>::Bare).encode(); + assert!(Ex::decode(&mut &encoded[..]).is_err()); + } + + #[test] + fn legacy_signed_encode_decode() { + let call: TestCall = vec![0u8; 0].into(); + let signed = TEST_ACCOUNT; + let signature = TestSig(TEST_ACCOUNT, (vec![0u8; 0], DummyExtension).encode()); + let extension = DummyExtension; + + let new_ux = Ex::new_signed(call.clone(), signed, signature.clone(), extension.clone()); + let old_ux = + OldUncheckedExtrinsic::::new_signed( + call, signed, signature, extension, + ); + + let encoded_new_ux = new_ux.encode(); + let encoded_old_ux = old_ux.encode(); + + assert_eq!(encoded_new_ux, encoded_old_ux); + + let decoded_new_ux = Ex::decode(&mut &encoded_new_ux[..]).unwrap(); + let decoded_old_ux = + OldUncheckedExtrinsic::::decode( + &mut &encoded_old_ux[..], + ) + .unwrap(); + + assert_eq!(new_ux, decoded_new_ux); + assert_eq!(old_ux, decoded_old_ux); + } + + #[test] + fn legacy_unsigned_encode_decode() { + let call: TestCall = vec![0u8; 0].into(); + + let new_ux = Ex::new_bare(call.clone()); + let old_ux = + OldUncheckedExtrinsic::::new_unsigned( + call, + ); + + let encoded_new_ux = new_ux.encode(); + let encoded_old_ux = old_ux.encode(); + + assert_eq!(encoded_new_ux, encoded_old_ux); + + let decoded_new_ux = Ex::decode(&mut &encoded_new_ux[..]).unwrap(); + let decoded_old_ux = + OldUncheckedExtrinsic::::decode( + &mut &encoded_old_ux[..], + ) + .unwrap(); + + assert_eq!(new_ux, decoded_new_ux); + assert_eq!(old_ux, decoded_old_ux); } } diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index 44bf3c969e5..70605970b4e 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -125,6 +125,11 @@ pub use sp_arithmetic::{ Perquintill, Rational128, Rounding, UpperOf, }; +pub use transaction_validity::{ + InvalidTransaction, TransactionSource, TransactionValidityError, UnknownTransaction, + ValidTransaction, +}; + pub use either::Either; /// The number of bytes of the module-specific `error` field defined in [`ModuleError`]. @@ -443,21 +448,21 @@ impl std::fmt::Display for MultiSigner { impl Verify for MultiSignature { type Signer = MultiSigner; fn verify>(&self, mut msg: L, signer: &AccountId32) -> bool { - match (self, signer) { - (Self::Ed25519(ref sig), who) => match ed25519::Public::from_slice(who.as_ref()) { + match self { + Self::Ed25519(ref sig) => match ed25519::Public::from_slice(signer.as_ref()) { Ok(signer) => sig.verify(msg, &signer), Err(()) => false, }, - (Self::Sr25519(ref sig), who) => match sr25519::Public::from_slice(who.as_ref()) { + Self::Sr25519(ref sig) => match sr25519::Public::from_slice(signer.as_ref()) { Ok(signer) => sig.verify(msg, &signer), Err(()) => false, }, - (Self::Ecdsa(ref sig), who) => { + Self::Ecdsa(ref sig) => { let m = sp_io::hashing::blake2_256(msg.get()); match sp_io::crypto::secp256k1_ecdsa_recover_compressed(sig.as_ref(), &m) { Ok(pubkey) => &sp_io::hashing::blake2_256(pubkey.as_ref()) == - >::as_ref(who), + >::as_ref(signer), _ => false, } }, @@ -944,9 +949,13 @@ impl<'a> ::serde::Deserialize<'a> for OpaqueExtrinsic { } } +// TODO: OpaqueExtrinsics cannot act like regular extrinsics, right?! impl traits::Extrinsic for OpaqueExtrinsic { type Call = (); type SignaturePayload = (); + fn is_bare(&self) -> bool { + false + } } /// Print something that implements `Printable` from the runtime. diff --git a/substrate/primitives/runtime/src/testing.rs b/substrate/primitives/runtime/src/testing.rs index 5f94c834a8f..3d6e0b5ab8b 100644 --- a/substrate/primitives/runtime/src/testing.rs +++ b/substrate/primitives/runtime/src/testing.rs @@ -21,24 +21,16 @@ use crate::{ codec::{Codec, Decode, Encode, MaxEncodedLen}, generic, scale_info::TypeInfo, - traits::{ - self, Applyable, BlakeTwo256, Checkable, DispatchInfoOf, Dispatchable, OpaqueKeys, - PostDispatchInfoOf, SignaturePayload, SignedExtension, ValidateUnsigned, - }, - transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, - ApplyExtrinsicResultWithInfo, KeyTypeId, + traits::{self, BlakeTwo256, Dispatchable, OpaqueKeys}, + DispatchResultWithInfo, KeyTypeId, }; -use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize, Serializer}; +use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize}; use sp_core::{ crypto::{key_types, ByteArray, CryptoType, Dummy}, U256, }; pub use sp_core::{sr25519, H256}; -use std::{ - cell::RefCell, - fmt::{self, Debug}, - ops::Deref, -}; +use std::{cell::RefCell, fmt::Debug}; /// A dummy type which can be used instead of regular cryptographic primitives. /// @@ -198,42 +190,6 @@ impl Header { } } -/// An opaque extrinsic wrapper type. -#[derive(PartialEq, Eq, Clone, Debug, Encode, Decode)] -pub struct ExtrinsicWrapper(Xt); - -impl traits::Extrinsic for ExtrinsicWrapper { - type Call = (); - type SignaturePayload = (); - - fn is_signed(&self) -> Option { - None - } -} - -impl serde::Serialize for ExtrinsicWrapper { - fn serialize(&self, seq: S) -> Result - where - S: ::serde::Serializer, - { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } -} - -impl From for ExtrinsicWrapper { - fn from(xt: Xt) -> Self { - ExtrinsicWrapper(xt) - } -} - -impl Deref for ExtrinsicWrapper { - type Target = Xt; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - /// Testing block #[derive(PartialEq, Eq, Clone, Serialize, Debug, Encode, Decode, TypeInfo)] pub struct Block { @@ -283,139 +239,22 @@ where } } -/// The signature payload of a `TestXt`. -type TxSingaturePayload = (u64, Extra); - -impl SignaturePayload for TxSingaturePayload { - type SignatureAddress = u64; - type Signature = (); - type SignatureExtra = Extra; -} - -/// Test transaction, tuple of (sender, call, signed_extra) -/// with index only used if sender is some. -/// -/// If sender is some then the transaction is signed otherwise it is unsigned. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] -pub struct TestXt { - /// Signature of the extrinsic. - pub signature: Option>, - /// Call of the extrinsic. - pub call: Call, -} - -impl TestXt { - /// Create a new `TextXt`. - pub fn new(call: Call, signature: Option<(u64, Extra)>) -> Self { - Self { call, signature } - } -} - -impl Serialize for TestXt -where - TestXt: Encode, -{ - fn serialize(&self, seq: S) -> Result - where - S: Serializer, - { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } -} +/// Wrapper over a `u64` that can be used as a `RuntimeCall`. +#[derive(PartialEq, Eq, Debug, Clone, Encode, Decode, TypeInfo)] +pub struct MockCallU64(pub u64); -impl Debug for TestXt { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "TestXt({:?}, ...)", self.signature.as_ref().map(|x| &x.0)) +impl Dispatchable for MockCallU64 { + type RuntimeOrigin = u64; + type Config = (); + type Info = (); + type PostInfo = (); + fn dispatch(self, _origin: Self::RuntimeOrigin) -> DispatchResultWithInfo { + Ok(()) } } -impl Checkable for TestXt { - type Checked = Self; - fn check(self, _: &Context) -> Result { - Ok(self) - } - - #[cfg(feature = "try-runtime")] - fn unchecked_into_checked_i_know_what_i_am_doing( - self, - _: &Context, - ) -> Result { - unreachable!() - } -} - -impl traits::Extrinsic - for TestXt -{ - type Call = Call; - type SignaturePayload = TxSingaturePayload; - - fn is_signed(&self) -> Option { - Some(self.signature.is_some()) - } - - fn new(c: Call, sig: Option) -> Option { - Some(TestXt { signature: sig, call: c }) - } -} - -impl traits::ExtrinsicMetadata for TestXt -where - Call: Codec + Sync + Send, - Extra: SignedExtension, -{ - type SignedExtensions = Extra; - const VERSION: u8 = 0u8; -} - -impl Applyable for TestXt -where - Call: 'static - + Sized - + Send - + Sync - + Clone - + Eq - + Codec - + Debug - + Dispatchable, - Extra: SignedExtension, - Origin: From>, -{ - type Call = Call; - - /// Checks to see if this is a valid *transaction*. It returns information on it if so. - fn validate>( - &self, - source: TransactionSource, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - if let Some((ref id, ref extra)) = self.signature { - Extra::validate(extra, id, &self.call, info, len) - } else { - let valid = Extra::validate_unsigned(&self.call, info, len)?; - let unsigned_validation = U::validate_unsigned(source, &self.call)?; - Ok(valid.combine_with(unsigned_validation)) - } - } - - /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, - /// index and sender. - fn apply>( - self, - info: &DispatchInfoOf, - len: usize, - ) -> ApplyExtrinsicResultWithInfo> { - let maybe_who = if let Some((who, extra)) = self.signature { - Extra::pre_dispatch(extra, &who, &self.call, info, len)?; - Some(who) - } else { - Extra::pre_dispatch_unsigned(&self.call, info, len)?; - U::pre_dispatch(&self.call)?; - None - }; - - Ok(self.call.dispatch(maybe_who.into())) +impl From for MockCallU64 { + fn from(value: u64) -> Self { + Self(value) } } diff --git a/substrate/primitives/runtime/src/traits.rs b/substrate/primitives/runtime/src/traits/mod.rs similarity index 94% rename from substrate/primitives/runtime/src/traits.rs rename to substrate/primitives/runtime/src/traits/mod.rs index caede5e2b59..b217166d8de 100644 --- a/substrate/primitives/runtime/src/traits.rs +++ b/substrate/primitives/runtime/src/traits/mod.rs @@ -19,7 +19,7 @@ use crate::{ generic::Digest, - scale_info::{MetaType, StaticTypeInfo, TypeInfo}, + scale_info::{StaticTypeInfo, TypeInfo}, transaction_validity::{ TransactionSource, TransactionValidity, TransactionValidityError, UnknownTransaction, ValidTransaction, @@ -52,6 +52,12 @@ use std::fmt::Display; #[cfg(feature = "std")] use std::str::FromStr; +pub mod transaction_extension; +pub use transaction_extension::{ + DispatchTransaction, TransactionExtension, TransactionExtensionBase, + TransactionExtensionInterior, TransactionExtensionMetadata, ValidateResult, +}; + /// A lazy value. pub trait Lazy { /// Get a reference to the underlying value. @@ -1253,7 +1259,7 @@ pub trait Header: // that is then used to define `UncheckedExtrinsic`. // ```ignore // pub type UncheckedExtrinsic = -// generic::UncheckedExtrinsic; +// generic::UncheckedExtrinsic; // ``` // This `UncheckedExtrinsic` is supplied to the `Block`. // ```ignore @@ -1323,19 +1329,31 @@ pub trait Extrinsic: Sized { /// Is this `Extrinsic` signed? /// If no information are available about signed/unsigned, `None` should be returned. + #[deprecated = "Use and implement `!is_bare()` instead"] fn is_signed(&self) -> Option { None } - /// Create new instance of the extrinsic. - /// - /// Extrinsics can be split into: - /// 1. Inherents (no signature; created by validators during block production) - /// 2. Unsigned Transactions (no signature; represent "system calls" or other special kinds of - /// calls) 3. Signed Transactions (with signature; a regular transactions with known origin) + /// Returns `true` if this `Extrinsic` is bare. + fn is_bare(&self) -> bool { + #[allow(deprecated)] + !self + .is_signed() + .expect("`is_signed` must return `Some` on production extrinsics; qed") + } + + /// Create a new old-school extrinsic, either a bare extrinsic if `_signed_data` is `None` or + /// a signed transaction is it is `Some`. + #[deprecated = "Use `new_inherent` or the `CreateTransaction` trait instead"] fn new(_call: Self::Call, _signed_data: Option) -> Option { None } + + /// Create a new inherent extrinsic. + fn new_inherent(function: Self::Call) -> Self { + #[allow(deprecated)] + Self::new(function, None).expect("Extrinsic must provide inherents; qed") + } } /// Something that acts like a [`SignaturePayload`](Extrinsic::SignaturePayload) of an @@ -1371,7 +1389,8 @@ pub trait ExtrinsicMetadata { const VERSION: u8; /// Signed extensions attached to this `Extrinsic`. - type SignedExtensions: SignedExtension; + // TODO: metadata-v16: rename to `Extension`. + type Extra; } /// Extract the hashing type for a block. @@ -1456,6 +1475,8 @@ pub trait Dispatchable { -> crate::DispatchResultWithInfo; } +/// Shortcut to reference the `Origin` type of a `Dispatchable`. +pub type OriginOf = ::RuntimeOrigin; /// Shortcut to reference the `Info` type of a `Dispatchable`. pub type DispatchInfoOf = ::Info; /// Shortcut to reference the `PostInfo` type of a `Dispatchable`. @@ -1474,8 +1495,49 @@ impl Dispatchable for () { } } +/// Dispatchable impl containing an arbitrary value which panics if it actually is dispatched. +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct FakeDispatchable(pub Inner); +impl From for FakeDispatchable { + fn from(inner: Inner) -> Self { + Self(inner) + } +} +impl FakeDispatchable { + /// Take `self` and return the underlying inner value. + pub fn deconstruct(self) -> Inner { + self.0 + } +} +impl AsRef for FakeDispatchable { + fn as_ref(&self) -> &Inner { + &self.0 + } +} + +impl Dispatchable for FakeDispatchable { + type RuntimeOrigin = (); + type Config = (); + type Info = (); + type PostInfo = (); + fn dispatch( + self, + _origin: Self::RuntimeOrigin, + ) -> crate::DispatchResultWithInfo { + panic!("This implementation should not be used for actual dispatch."); + } +} + +/// Runtime Origin which includes a System Origin variant whose `AccountId` is the parameter. +pub trait AsSystemOriginSigner { + /// Extract a reference of the inner value of the System `Origin::Signed` variant, if self has + /// that variant. + fn as_system_origin_signer(&self) -> Option<&AccountId>; +} + /// Means by which a transaction may be extended. This type embodies both the data and the logic /// that should be additionally associated with the transaction. It should be plain old data. +#[deprecated = "Use `TransactionExtension` instead."] pub trait SignedExtension: Codec + Debug + Sync + Send + Clone + Eq + PartialEq + StaticTypeInfo { @@ -1493,7 +1555,7 @@ pub trait SignedExtension: /// Any additional data that will go into the signed payload. This may be created dynamically /// from the transaction using the `additional_signed` function. - type AdditionalSigned: Encode + TypeInfo; + type AdditionalSigned: Codec + TypeInfo; /// The type that encodes information that can be passed from pre_dispatch to post-dispatch. type Pre; @@ -1532,38 +1594,6 @@ pub trait SignedExtension: len: usize, ) -> Result; - /// Validate an unsigned transaction for the transaction queue. - /// - /// This function can be called frequently by the transaction queue - /// to obtain transaction validity against current state. - /// It should perform all checks that determine a valid unsigned transaction, - /// and quickly eliminate ones that are stale or incorrect. - /// - /// Make sure to perform the same checks in `pre_dispatch_unsigned` function. - fn validate_unsigned( - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - Ok(ValidTransaction::default()) - } - - /// Do any pre-flight stuff for a unsigned transaction. - /// - /// Note this function by default delegates to `validate_unsigned`, so that - /// all checks performed for the transaction queue are also performed during - /// the dispatch phase (applying the extrinsic). - /// - /// If you ever override this function, you need to make sure to always - /// perform the same validation as in `validate_unsigned`. - fn pre_dispatch_unsigned( - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result<(), TransactionValidityError> { - Self::validate_unsigned(call, info, len).map(|_| ()).map_err(Into::into) - } - /// Do any post-flight stuff for an extrinsic. /// /// If the transaction is signed, then `_pre` will contain the output of `pre_dispatch`, @@ -1594,126 +1624,47 @@ pub trait SignedExtension: /// /// As a [`SignedExtension`] can be a tuple of [`SignedExtension`]s we need to return a `Vec` /// that holds the metadata of each one. Each individual `SignedExtension` must return - /// *exactly* one [`SignedExtensionMetadata`]. + /// *exactly* one [`TransactionExtensionMetadata`]. /// /// This method provides a default implementation that returns a vec containing a single - /// [`SignedExtensionMetadata`]. - fn metadata() -> Vec { - sp_std::vec![SignedExtensionMetadata { + /// [`TransactionExtensionMetadata`]. + fn metadata() -> Vec { + sp_std::vec![TransactionExtensionMetadata { identifier: Self::IDENTIFIER, ty: scale_info::meta_type::(), additional_signed: scale_info::meta_type::() }] } -} - -/// Information about a [`SignedExtension`] for the runtime metadata. -pub struct SignedExtensionMetadata { - /// The unique identifier of the [`SignedExtension`]. - pub identifier: &'static str, - /// The type of the [`SignedExtension`]. - pub ty: MetaType, - /// The type of the [`SignedExtension`] additional signed data for the payload. - pub additional_signed: MetaType, -} - -#[impl_for_tuples(1, 12)] -impl SignedExtension for Tuple { - for_tuples!( where #( Tuple: SignedExtension )* ); - type AccountId = AccountId; - type Call = Call; - const IDENTIFIER: &'static str = "You should call `identifier()`!"; - for_tuples!( type AdditionalSigned = ( #( Tuple::AdditionalSigned ),* ); ); - for_tuples!( type Pre = ( #( Tuple::Pre ),* ); ); - - fn additional_signed(&self) -> Result { - Ok(for_tuples!( ( #( Tuple.additional_signed()? ),* ) )) - } - - fn validate( - &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - let valid = ValidTransaction::default(); - for_tuples!( #( let valid = valid.combine_with(Tuple.validate(who, call, info, len)?); )* ); - Ok(valid) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - Ok(for_tuples!( ( #( Tuple.pre_dispatch(who, call, info, len)? ),* ) )) - } + /// Validate an unsigned transaction for the transaction queue. + /// + /// This function can be called frequently by the transaction queue + /// to obtain transaction validity against current state. + /// It should perform all checks that determine a valid unsigned transaction, + /// and quickly eliminate ones that are stale or incorrect. fn validate_unsigned( - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + _call: &Self::Call, + _info: &DispatchInfoOf, + _len: usize, ) -> TransactionValidity { - let valid = ValidTransaction::default(); - for_tuples!( #( let valid = valid.combine_with(Tuple::validate_unsigned(call, info, len)?); )* ); - Ok(valid) + Ok(ValidTransaction::default()) } + /// Do any pre-flight stuff for an unsigned transaction. + /// + /// Note this function by default delegates to `validate_unsigned`, so that + /// all checks performed for the transaction queue are also performed during + /// the dispatch phase (applying the extrinsic). + /// + /// If you ever override this function, you need not perform the same validation as in + /// `validate_unsigned`. fn pre_dispatch_unsigned( - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + _call: &Self::Call, + _info: &DispatchInfoOf, + _len: usize, ) -> Result<(), TransactionValidityError> { - for_tuples!( #( Tuple::pre_dispatch_unsigned(call, info, len)?; )* ); Ok(()) } - - fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, - len: usize, - result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - match pre { - Some(x) => { - for_tuples!( #( Tuple::post_dispatch(Some(x.Tuple), info, post_info, len, result)?; )* ); - }, - None => { - for_tuples!( #( Tuple::post_dispatch(None, info, post_info, len, result)?; )* ); - }, - } - Ok(()) - } - - fn metadata() -> Vec { - let mut ids = Vec::new(); - for_tuples!( #( ids.extend(Tuple::metadata()); )* ); - ids - } -} - -impl SignedExtension for () { - type AccountId = u64; - type AdditionalSigned = (); - type Call = (); - type Pre = (); - const IDENTIFIER: &'static str = "UnitSignedExtension"; - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } } /// An "executable" piece of information, used by the standard Substrate Executive in order to @@ -1762,6 +1713,7 @@ pub trait GetNodeBlockType { /// function is called right before dispatching the call wrapped by an unsigned extrinsic. The /// [`validate_unsigned`](Self::validate_unsigned) function is mainly being used in the context of /// the transaction pool to check the validity of the call wrapped by an unsigned extrinsic. +// TODO: Rename to ValidateBareTransaction (or just remove). pub trait ValidateUnsigned { /// The call to validate type Call; diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs b/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs new file mode 100644 index 00000000000..a834e88c0b4 --- /dev/null +++ b/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs @@ -0,0 +1,133 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The [AsTransactionExtension] adapter struct for adapting [SignedExtension]s to +//! [TransactionExtension]s. + +#![allow(deprecated)] + +use scale_info::TypeInfo; +use sp_core::RuntimeDebug; + +use crate::{ + traits::{AsSystemOriginSigner, SignedExtension, ValidateResult}, + InvalidTransaction, +}; + +use super::*; + +/// Adapter to use a `SignedExtension` in the place of a `TransactionExtension`. +#[derive(TypeInfo, Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +#[deprecated = "Convert your SignedExtension to a TransactionExtension."] +pub struct AsTransactionExtension(pub SE); + +impl Default for AsTransactionExtension { + fn default() -> Self { + Self(SE::default()) + } +} + +impl From for AsTransactionExtension { + fn from(value: SE) -> Self { + Self(value) + } +} + +impl TransactionExtensionBase for AsTransactionExtension { + const IDENTIFIER: &'static str = SE::IDENTIFIER; + type Implicit = SE::AdditionalSigned; + + fn implicit(&self) -> Result { + self.0.additional_signed() + } + fn metadata() -> Vec { + SE::metadata() + } +} + +impl TransactionExtension + for AsTransactionExtension +where + ::RuntimeOrigin: AsSystemOriginSigner + Clone, +{ + type Val = (); + type Pre = SE::Pre; + + fn validate( + &self, + origin: ::RuntimeOrigin, + call: &SE::Call, + info: &DispatchInfoOf, + len: usize, + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + let r = self.0.validate(who, call, info, len)?; + Ok((r, (), origin)) + } + + fn prepare( + self, + _: (), + origin: &::RuntimeOrigin, + call: &SE::Call, + info: &DispatchInfoOf, + len: usize, + _context: &Context, + ) -> Result { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + self.0.pre_dispatch(who, call, info, len) + } + + fn post_dispatch( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + _context: &Context, + ) -> Result<(), TransactionValidityError> { + SE::post_dispatch(Some(pre), info, post_info, len, result) + } + + fn validate_bare_compat( + call: &SE::Call, + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + SE::validate_unsigned(call, info, len) + } + + fn pre_dispatch_bare_compat( + call: &SE::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(), TransactionValidityError> { + SE::pre_dispatch_unsigned(call, info, len) + } + + fn post_dispatch_bare_compat( + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + SE::post_dispatch(None, info, post_info, len, result) + } +} diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs b/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs new file mode 100644 index 00000000000..8efa586aa0e --- /dev/null +++ b/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs @@ -0,0 +1,141 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The [DispatchTransaction] trait. + +use super::*; + +/// Single-function utility trait with a blanket impl over [TransactionExtension] in order to +/// provide transaction dispatching functionality. We avoid implementing this directly on the trait +/// since we never want it to be overriden by the trait implementation. +pub trait DispatchTransaction { + /// The origin type of the transaction. + type Origin; + /// The info type. + type Info; + /// The resultant type. + type Result; + /// The `Val` of the extension. + type Val; + /// The `Pre` of the extension. + type Pre; + /// Just validate a transaction. + /// + /// The is basically the same as [validate](TransactionExtension::validate), except that there + /// is no need to supply the bond data. + fn validate_only( + &self, + origin: Self::Origin, + call: &Call, + info: &Self::Info, + len: usize, + ) -> Result<(ValidTransaction, Self::Val, Self::Origin), TransactionValidityError>; + /// Prepare and validate a transaction, ready for dispatch. + fn validate_and_prepare( + self, + origin: Self::Origin, + call: &Call, + info: &Self::Info, + len: usize, + ) -> Result<(Self::Pre, Self::Origin), TransactionValidityError>; + /// Dispatch a transaction with the given base origin and call. + fn dispatch_transaction( + self, + origin: Self::Origin, + call: Call, + info: &Self::Info, + len: usize, + ) -> Self::Result; + /// Do everything which would be done in a [dispatch_transaction](Self::dispatch_transaction), + /// but instead of executing the call, execute `substitute` instead. Since this doesn't actually + /// dispatch the call, it doesn't need to consume it and so `call` can be passed as a reference. + fn test_run( + self, + origin: Self::Origin, + call: &Call, + info: &Self::Info, + len: usize, + substitute: impl FnOnce( + Self::Origin, + ) -> crate::DispatchResultWithInfo<::PostInfo>, + ) -> Self::Result; +} + +impl, Call: Dispatchable + Encode> DispatchTransaction + for T +{ + type Origin = ::RuntimeOrigin; + type Info = DispatchInfoOf; + type Result = crate::ApplyExtrinsicResultWithInfo>; + type Val = T::Val; + type Pre = T::Pre; + + fn validate_only( + &self, + origin: Self::Origin, + call: &Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(ValidTransaction, T::Val, Self::Origin), TransactionValidityError> { + self.validate(origin, call, info, len, &mut (), self.implicit()?, call) + } + fn validate_and_prepare( + self, + origin: Self::Origin, + call: &Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(T::Pre, Self::Origin), TransactionValidityError> { + let (_, val, origin) = self.validate_only(origin, call, info, len)?; + let pre = self.prepare(val, &origin, &call, info, len, &())?; + Ok((pre, origin)) + } + fn dispatch_transaction( + self, + origin: ::RuntimeOrigin, + call: Call, + info: &DispatchInfoOf, + len: usize, + ) -> Self::Result { + let (pre, origin) = self.validate_and_prepare(origin, &call, info, len)?; + let res = call.dispatch(origin); + let post_info = res.unwrap_or_else(|err| err.post_info); + let pd_res = res.map(|_| ()).map_err(|e| e.error); + T::post_dispatch(pre, info, &post_info, len, &pd_res, &())?; + Ok(res) + } + fn test_run( + self, + origin: Self::Origin, + call: &Call, + info: &Self::Info, + len: usize, + substitute: impl FnOnce( + Self::Origin, + ) -> crate::DispatchResultWithInfo<::PostInfo>, + ) -> Self::Result { + let (pre, origin) = self.validate_and_prepare(origin, &call, info, len)?; + let res = substitute(origin); + let post_info = match res { + Ok(info) => info, + Err(err) => err.post_info, + }; + let pd_res = res.map(|_| ()).map_err(|e| e.error); + T::post_dispatch(pre, info, &post_info, len, &pd_res, &())?; + Ok(res) + } +} diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs new file mode 100644 index 00000000000..69a0ba18adb --- /dev/null +++ b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs @@ -0,0 +1,526 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The transaction extension trait. + +use crate::{ + scale_info::{MetaType, StaticTypeInfo}, + transaction_validity::{TransactionValidity, TransactionValidityError, ValidTransaction}, + DispatchResult, +}; +use codec::{Codec, Decode, Encode}; +use impl_trait_for_tuples::impl_for_tuples; +#[doc(hidden)] +pub use sp_std::marker::PhantomData; +use sp_std::{self, fmt::Debug, prelude::*}; +use sp_weights::Weight; +use tuplex::{PopFront, PushBack}; + +use super::{DispatchInfoOf, Dispatchable, OriginOf, PostDispatchInfoOf}; + +mod as_transaction_extension; +mod dispatch_transaction; +#[allow(deprecated)] +pub use as_transaction_extension::AsTransactionExtension; +pub use dispatch_transaction::DispatchTransaction; + +/// Shortcut for the result value of the `validate` function. +pub type ValidateResult = + Result<(ValidTransaction, Val, OriginOf), TransactionValidityError>; + +/// Simple blanket implementation trait to denote the bounds of a type which can be contained within +/// a [`TransactionExtension`]. +pub trait TransactionExtensionInterior: + Codec + Debug + Sync + Send + Clone + Eq + PartialEq + StaticTypeInfo +{ +} +impl + TransactionExtensionInterior for T +{ +} + +/// Base for [TransactionExtension]s; this contains the associated types and does not require any +/// generic parameterization. +pub trait TransactionExtensionBase: TransactionExtensionInterior { + /// Unique identifier of this signed extension. + /// + /// This will be exposed in the metadata to identify the signed extension used in an extrinsic. + const IDENTIFIER: &'static str; + + /// Any additional data which was known at the time of transaction construction and can be + /// useful in authenticating the transaction. This is determined dynamically in part from the + /// on-chain environment using the `implicit` function and not directly contained in the + /// transaction itself and therefore is considered "implicit". + type Implicit: Codec + StaticTypeInfo; + + /// Determine any additional data which was known at the time of transaction construction and + /// can be useful in authenticating the transaction. The expected usage of this is to include in + /// any data which is signed and verified as part of transactiob validation. Also perform any + /// pre-signature-verification checks and return an error if needed. + fn implicit(&self) -> Result { + use crate::InvalidTransaction::IndeterminateImplicit; + Ok(Self::Implicit::decode(&mut &[][..]).map_err(|_| IndeterminateImplicit)?) + } + + /// The weight consumed by executing this extension instance fully during transaction dispatch. + fn weight(&self) -> Weight { + Weight::zero() + } + + /// Returns the metadata for this extension. + /// + /// As a [`TransactionExtension`] can be a tuple of [`TransactionExtension`]s we need to return + /// a `Vec` that holds the metadata of each one. Each individual `TransactionExtension` must + /// return *exactly* one [`TransactionExtensionMetadata`]. + /// + /// This method provides a default implementation that returns a vec containing a single + /// [`TransactionExtensionMetadata`]. + fn metadata() -> Vec { + sp_std::vec![TransactionExtensionMetadata { + identifier: Self::IDENTIFIER, + ty: scale_info::meta_type::(), + // TODO: Metadata-v16: Rename to "implicit" + additional_signed: scale_info::meta_type::() + }] + } +} + +/// Means by which a transaction may be extended. This type embodies both the data and the logic +/// that should be additionally associated with the transaction. It should be plain old data. +/// +/// The simplest transaction extension would be the Unit type (and empty pipeline) `()`. This +/// executes no additional logic and implies a dispatch of the transaction's call using the +/// inherited origin (either `None` or `Signed`, depending on whether this is a signed or general +/// transaction). +/// +/// Transaction extensions are capable of altering certain associated semantics: +/// +/// - They may define the origin with which the transaction's call should be dispatched. +/// - They may define various parameters used by the transction queue to determine under what +/// conditions the transaction should be retained and introduced on-chain. +/// - They may define whether this transaction is acceptable for introduction on-chain at all. +/// +/// Each of these semantics are defined by the `validate` function. +/// +/// **NOTE: Transaction extensions cannot under any circumctances alter the call itself.** +/// +/// Transaction extensions are capable of defining logic which is executed additionally to the +/// dispatch of the call: +/// +/// - They may define logic which must be executed prior to the dispatch of the call. +/// - They may also define logic which must be executed after the dispatch of the call. +/// +/// Each of these semantics are defined by the `prepare` and `post_dispatch` functions respectively. +/// +/// Finally, transaction extensions may define additional data to help define the implications of +/// the logic they introduce. This additional data may be explicitly defined by the transaction +/// author (in which case it is included as part of the transaction body), or it may be implicitly +/// defined by the transaction extension based around the on-chain state (which the transaction +/// author is assumed to know). This data may be utilized by the above logic to alter how a node's +/// transaction queue treats this transaction. +/// +/// ## Default implementations +/// +/// Of the 5 functions in this trait, 3 of them must return a value of an associated type on +/// success, and none of these types implement [Default] or anything like it. This means that +/// default implementations cannot be provided for these functions. However, a macro is provided +/// [impl_tx_ext_default](crate::impl_tx_ext_default) which is capable of generating default +/// implementations for each of these 3 functions. If you do not wish to introduce additional logic +/// into the transaction pipeline, then it is recommended that you use this macro to implement these +/// functions. +/// +/// ## Pipelines, Inherited Implications, and Authorized Origins +/// +/// Requiring a single transaction extension to define all of the above semantics would be +/// cumbersome and would lead to a lot of boilerplate. Instead, transaction extensions are +/// aggregated into pipelines, which are tuples of transaction extensions. Each extension in the +/// pipeline is executed in order, and the output of each extension is aggregated and/or relayed as +/// the input to the next extension in the pipeline. +/// +/// This ordered composition happens with all datatypes ([Val](TransactionExtension::Val), +/// [Pre](TransactionExtension::Pre) and [Implicit](TransactionExtensionBase::Implicit)) as well as +/// all functions. There are important consequences stemming from how the composition affects the +/// meaning of the `origin` and `implication` parameters as well as the results. Whereas the +/// [prepare](TransactionExtension::prepare) and +/// [post_dispatch](TransactionExtension::post_dispatch) functions are clear in their meaning, the +/// [validate](TransactionExtension::validate) function is fairly sophisticated and warrants +/// further explanation. +/// +/// Firstly, the `origin` parameter. The `origin` passed into the first item in a pipeline is simply +/// that passed into the tuple itself. It represents an authority who has authorized the implication +/// of the transaction, as of the extension it has been passed into *and any further extensions it +/// may pass though, all the way to, and including, the transaction's dispatch call itself. Each +/// following item in the pipeline is passed the origin which the previous item returned. The origin +/// returned from the final item in the pipeline is the origin which is returned by the tuple +/// itself. +/// +/// This means that if a constituent extension returns a different origin to the one it was called +/// with, then (assuming no other extension changes it further) *this new origin will be used for +/// all extensions following it in the pipeline, and will be returned from the pipeline to be used +/// as the origin for the call's dispatch*. The call itself as well as all these extensions +/// following may each imply consequence for this origin. We call this the *inherited implication*. +/// +/// The *inherited implication* is the cumulated on-chain effects born by whatever origin is +/// returned. It is expressed to the [validate](TransactionExtension::validate) function only as the +/// `implication` argument which implements the [Encode] trait. A transaction extension may define +/// its own implications through its own fields and the +/// [implicit](TransactionExtensionBase::implicit) function. This is only utilized by extensions +/// which preceed it in a pipeline or, if the transaction is an old-school signed trasnaction, the +/// underlying transaction verification logic. +/// +/// **The inherited implication passed as the `implication` parameter to +/// [validate](TransactionExtension::validate) does not include the extension's inner data itself +/// nor does it include the result of the extension's `implicit` function.** If you both provide an +/// implication and rely on the implication, then you need to manually aggregate your extensions +/// implication with the aggregated implication passed in. +pub trait TransactionExtension: TransactionExtensionBase { + /// The type that encodes information that can be passed from validate to prepare. + type Val; + + /// The type that encodes information that can be passed from prepare to post-dispatch. + type Pre; + + /// Validate a transaction for the transaction queue. + /// + /// This function can be called frequently by the transaction queue to obtain transaction + /// validity against current state. It should perform all checks that determine a valid + /// transaction, that can pay for its execution and quickly eliminate ones that are stale or + /// incorrect. + /// + /// Parameters: + /// - `origin`: The origin of the transaction which this extension inherited; coming from an + /// "old-school" *signed transaction*, this will be a system `RawOrigin::Signed` value. If the + /// transaction is a "new-school" *General Transaction*, then this will be a system + /// `RawOrigin::None` value. If this extension is an item in a composite, then it could be + /// anything which was previously returned as an `origin` value in the result of a `validate` + /// call. + /// - `call`: The `Call` wrapped by this extension. + /// - `info`: Information concerning, and inherent to, the transaction's call. + /// - `len`: The total length of the encoded transaction. + /// - `inherited_implication`: The *implication* which this extension inherits. This is a tuple + /// of the transaction's call and some additional opaque-but-encodable data. Coming directly + /// from a transaction, the latter is [()]. However, if this extension is expressed as part of + /// a composite type, then the latter component is equal to any further implications to which + /// the returned `origin` could potentially apply. See Pipelines, Inherited Implications, and + /// Authorized Origins for more information. + /// - `context`: Some opaque mutable context, as yet unused. + /// + /// Returns a [ValidateResult], which is a [Result] whose success type is a tuple of + /// [ValidTransaction] (defining useful metadata for the transaction queue), the [Self::Val] + /// token of this transaction, which gets passed into [prepare](TransactionExtension::prepare), + /// and the origin of the transaction, which gets passed into + /// [prepare](TransactionExtension::prepare) and is ultimately used for dispatch. + fn validate( + &self, + origin: OriginOf, + call: &Call, + info: &DispatchInfoOf, + len: usize, + context: &mut Context, + self_implicit: Self::Implicit, + inherited_implication: &impl Encode, + ) -> ValidateResult; + + /// Do any pre-flight stuff for a transaction after validation. + /// + /// This is for actions which do not happen in the transaction queue but only immediately prior + /// to the point of dispatch on-chain. This should not return an error, since errors should + /// already have been identified during the [validate](TransactionExtension::validate) call. If + /// an error is returned, the transaction will be considered invalid but no state changes will + /// happen and therefore work done in [validate](TransactionExtension::validate) will not be + /// paid for. + /// + /// Unlike `validate`, this function may consume `self`. + /// + /// Parameters: + /// - `val`: `Self::Val` returned by the result of the `validate` call. + /// - `origin`: The origin returned by the result of the `validate` call. + /// - `call`: The `Call` wrapped by this extension. + /// - `info`: Information concerning, and inherent to, the transaction's call. + /// - `len`: The total length of the encoded transaction. + /// - `context`: Some opaque mutable context, as yet unused. + /// + /// Returns a [Self::Pre] value on success, which gets passed into + /// [post_dispatch](TransactionExtension::post_dispatch) and after the call is dispatched. + /// + /// IMPORTANT: **Checks made in validation need not be repeated here.** + fn prepare( + self, + val: Self::Val, + origin: &OriginOf, + call: &Call, + info: &DispatchInfoOf, + len: usize, + context: &Context, + ) -> Result; + + /// Do any post-flight stuff for an extrinsic. + /// + /// `_pre` contains the output of `prepare`. + /// + /// This gets given the `DispatchResult` `_result` from the extrinsic and can, if desired, + /// introduce a `TransactionValidityError`, causing the block to become invalid for including + /// it. + /// + /// Parameters: + /// - `pre`: `Self::Pre` returned by the result of the `prepare` call prior to dispatch. + /// - `info`: Information concerning, and inherent to, the transaction's call. + /// - `post_info`: Information concerning the dispatch of the transaction's call. + /// - `len`: The total length of the encoded transaction. + /// - `result`: The result of the dispatch. + /// - `context`: Some opaque mutable context, as yet unused. + /// + /// WARNING: It is dangerous to return an error here. To do so will fundamentally invalidate the + /// transaction and any block that it is included in, causing the block author to not be + /// compensated for their work in validating the transaction or producing the block so far. It + /// can only be used safely when you *know* that the transaction is one that would only be + /// introduced by the current block author. + fn post_dispatch( + _pre: Self::Pre, + _info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + _context: &Context, + ) -> Result<(), TransactionValidityError> { + Ok(()) + } + + /// Compatibility function for supporting the `SignedExtension::validate_unsigned` function. + /// + /// DO NOT USE! THIS MAY BE REMOVED AT ANY TIME! + #[deprecated = "Only for compatibility. DO NOT USE."] + fn validate_bare_compat( + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> TransactionValidity { + Ok(ValidTransaction::default()) + } + + /// Compatibility function for supporting the `SignedExtension::pre_dispatch_unsigned` function. + /// + /// DO NOT USE! THIS MAY BE REMOVED AT ANY TIME! + #[deprecated = "Only for compatibility. DO NOT USE."] + fn pre_dispatch_bare_compat( + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result<(), TransactionValidityError> { + Ok(()) + } + + /// Compatibility function for supporting the `SignedExtension::post_dispatch` function where + /// `pre` is `None`. + /// + /// DO NOT USE! THIS MAY BE REMOVED AT ANY TIME! + #[deprecated = "Only for compatibility. DO NOT USE."] + fn post_dispatch_bare_compat( + _info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + Ok(()) + } +} + +/// Implict +#[macro_export] +macro_rules! impl_tx_ext_default { + ($call:ty ; $context:ty ; , $( $rest:tt )*) => { + impl_tx_ext_default!{$call ; $context ; $( $rest )*} + }; + ($call:ty ; $context:ty ; validate $( $rest:tt )*) => { + fn validate( + &self, + origin: $crate::traits::OriginOf<$call>, + _call: &$call, + _info: &$crate::traits::DispatchInfoOf<$call>, + _len: usize, + _context: &mut $context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl $crate::codec::Encode, + ) -> $crate::traits::ValidateResult { + Ok((Default::default(), Default::default(), origin)) + } + impl_tx_ext_default!{$call ; $context ; $( $rest )*} + }; + ($call:ty ; $context:ty ; prepare $( $rest:tt )*) => { + fn prepare( + self, + _val: Self::Val, + _origin: &$crate::traits::OriginOf<$call>, + _call: &$call, + _info: &$crate::traits::DispatchInfoOf<$call>, + _len: usize, + _context: & $context, + ) -> Result { + Ok(Default::default()) + } + impl_tx_ext_default!{$call ; $context ; $( $rest )*} + }; + ($call:ty ; $context:ty ;) => {}; +} + +/// Information about a [`TransactionExtension`] for the runtime metadata. +pub struct TransactionExtensionMetadata { + /// The unique identifier of the [`TransactionExtension`]. + pub identifier: &'static str, + /// The type of the [`TransactionExtension`]. + pub ty: MetaType, + /// The type of the [`TransactionExtension`] additional signed data for the payload. + // TODO: Rename "implicit" + pub additional_signed: MetaType, +} + +#[impl_for_tuples(1, 12)] +impl TransactionExtensionBase for Tuple { + for_tuples!( where #( Tuple: TransactionExtensionBase )* ); + const IDENTIFIER: &'static str = "Use `metadata()`!"; + for_tuples!( type Implicit = ( #( Tuple::Implicit ),* ); ); + fn implicit(&self) -> Result { + Ok(for_tuples!( ( #( Tuple.implicit()? ),* ) )) + } + fn weight(&self) -> Weight { + let mut weight = Weight::zero(); + for_tuples!( #( weight += Tuple.weight(); )* ); + weight + } + fn metadata() -> Vec { + let mut ids = Vec::new(); + for_tuples!( #( ids.extend(Tuple::metadata()); )* ); + ids + } +} + +#[impl_for_tuples(1, 12)] +impl TransactionExtension for Tuple { + for_tuples!( where #( Tuple: TransactionExtension )* ); + for_tuples!( type Val = ( #( Tuple::Val ),* ); ); + for_tuples!( type Pre = ( #( Tuple::Pre ),* ); ); + + fn validate( + &self, + origin: ::RuntimeOrigin, + call: &Call, + info: &DispatchInfoOf, + len: usize, + context: &mut Context, + self_implicit: Self::Implicit, + inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + let valid = ValidTransaction::default(); + let val = (); + let following_explicit_implications = for_tuples!( ( #( &self.Tuple ),* ) ); + let following_implicit_implications = self_implicit; + + for_tuples!(#( + // Implication of this pipeline element not relevant for later items, so we pop it. + let (_item, following_explicit_implications) = following_explicit_implications.pop_front(); + let (item_implicit, following_implicit_implications) = following_implicit_implications.pop_front(); + let (item_valid, item_val, origin) = { + let implications = ( + // The first is the implications born of the fact we return the mutated + // origin. + inherited_implication, + // This is the explicitly made implication born of the fact the new origin is + // passed into the next items in this pipeline-tuple. + &following_explicit_implications, + // This is the implicitly made implication born of the fact the new origin is + // passed into the next items in this pipeline-tuple. + &following_implicit_implications, + ); + Tuple.validate(origin, call, info, len, context, item_implicit, &implications)? + }; + let valid = valid.combine_with(item_valid); + let val = val.push_back(item_val); + )* ); + Ok((valid, val, origin)) + } + + fn prepare( + self, + val: Self::Val, + origin: &::RuntimeOrigin, + call: &Call, + info: &DispatchInfoOf, + len: usize, + context: &Context, + ) -> Result { + Ok(for_tuples!( ( #( + Tuple::prepare(self.Tuple, val.Tuple, origin, call, info, len, context)? + ),* ) )) + } + + fn post_dispatch( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + context: &Context, + ) -> Result<(), TransactionValidityError> { + for_tuples!( #( Tuple::post_dispatch(pre.Tuple, info, post_info, len, result, context)?; )* ); + Ok(()) + } +} + +impl TransactionExtensionBase for () { + const IDENTIFIER: &'static str = "UnitTransactionExtension"; + type Implicit = (); + fn implicit(&self) -> sp_std::result::Result { + Ok(()) + } + fn weight(&self) -> Weight { + Weight::zero() + } +} + +impl TransactionExtension for () { + type Val = (); + type Pre = (); + fn validate( + &self, + origin: ::RuntimeOrigin, + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, (), ::RuntimeOrigin), + TransactionValidityError, + > { + Ok((ValidTransaction::default(), (), origin)) + } + fn prepare( + self, + _val: (), + _origin: &::RuntimeOrigin, + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + _context: &Context, + ) -> Result<(), TransactionValidityError> { + Ok(()) + } +} diff --git a/substrate/primitives/runtime/src/transaction_validity.rs b/substrate/primitives/runtime/src/transaction_validity.rs index 83694849382..24124128083 100644 --- a/substrate/primitives/runtime/src/transaction_validity.rs +++ b/substrate/primitives/runtime/src/transaction_validity.rs @@ -82,6 +82,8 @@ pub enum InvalidTransaction { MandatoryValidation, /// The sending address is disabled or known to be invalid. BadSigner, + /// The implicit data was unable to be calculated. + IndeterminateImplicit, } impl InvalidTransaction { @@ -113,6 +115,8 @@ impl From for &'static str { "Transaction dispatch is mandatory; transactions must not be validated.", InvalidTransaction::Custom(_) => "InvalidTransaction custom error", InvalidTransaction::BadSigner => "Invalid signing address", + InvalidTransaction::IndeterminateImplicit => + "The implicit data was unable to be calculated", } } } @@ -338,7 +342,7 @@ pub struct ValidTransactionBuilder { impl ValidTransactionBuilder { /// Set the priority of a transaction. /// - /// Note that the final priority for `FRAME` is combined from all `SignedExtension`s. + /// Note that the final priority for `FRAME` is combined from all `TransactionExtension`s. /// Most likely for unsigned transactions you want the priority to be higher /// than for regular transactions. We recommend exposing a base priority for unsigned /// transactions as a runtime module parameter, so that the runtime can tune inter-module diff --git a/substrate/scripts/run_all_benchmarks.sh b/substrate/scripts/run_all_benchmarks.sh index 6dd7cede319..fe5f89a5b56 100755 --- a/substrate/scripts/run_all_benchmarks.sh +++ b/substrate/scripts/run_all_benchmarks.sh @@ -107,6 +107,19 @@ for PALLET in "${PALLETS[@]}"; do FOLDER="$(echo "${PALLET#*_}" | tr '_' '-')"; WEIGHT_FILE="./frame/${FOLDER}/src/weights.rs" + + # Special handling of custom weight paths. + if [ "$PALLET" == "frame_system_extensions" ] || [ "$PALLET" == "frame-system-extensions" ] + then + WEIGHT_FILE="./frame/system/src/extensions/weights.rs" + elif [ "$PALLET" == "pallet_asset_conversion_tx_payment" ] || [ "$PALLET" == "pallet-asset-conversion-tx-payment" ] + then + WEIGHT_FILE="./frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs" + elif [ "$PALLET" == "pallet_asset_tx_payment" ] || [ "$PALLET" == "pallet-asset-tx-payment" ] + then + WEIGHT_FILE="./frame/transaction-payment/asset-tx-payment/src/weights.rs" + fi + echo "[+] Benchmarking $PALLET with weight file $WEIGHT_FILE"; OUTPUT=$( diff --git a/substrate/test-utils/runtime/src/extrinsic.rs b/substrate/test-utils/runtime/src/extrinsic.rs index 05ffb7db5d5..7eb1839e97b 100644 --- a/substrate/test-utils/runtime/src/extrinsic.rs +++ b/substrate/test-utils/runtime/src/extrinsic.rs @@ -25,7 +25,7 @@ use codec::Encode; use frame_system::{CheckNonce, CheckWeight}; use sp_core::crypto::Pair as TraitPair; use sp_keyring::AccountKeyring; -use sp_runtime::{transaction_validity::TransactionPriority, Perbill}; +use sp_runtime::{generic::Preamble, transaction_validity::TransactionPriority, Perbill}; use sp_std::prelude::*; /// Transfer used in test substrate pallet. Extrinsic is created and signed using this data. @@ -66,11 +66,11 @@ impl TryFrom<&Extrinsic> for TransferData { match uxt { Extrinsic { function: RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value }), - signature: Some((from, _, (CheckNonce(nonce), ..))), + preamble: Preamble::Signed(from, _, ((CheckNonce(nonce), ..), ..)), } => Ok(TransferData { from: *from, to: *dest, amount: *value, nonce: *nonce }), Extrinsic { function: RuntimeCall::SubstrateTest(PalletCall::bench_call { transfer }), - signature: None, + preamble: Preamble::Bare, } => Ok(transfer.clone()), _ => Err(()), } @@ -190,18 +190,17 @@ impl ExtrinsicBuilder { /// Build `Extrinsic` using embedded parameters pub fn build(self) -> Extrinsic { if let Some(signer) = self.signer { - let extra = ( - CheckNonce::from(self.nonce.unwrap_or(0)), - CheckWeight::new(), + let tx_ext = ( + (CheckNonce::from(self.nonce.unwrap_or(0)), CheckWeight::new()), CheckSubstrateCall {}, ); let raw_payload = - SignedPayload::from_raw(self.function.clone(), extra.clone(), ((), (), ())); + SignedPayload::from_raw(self.function.clone(), tx_ext.clone(), (((), ()), ())); let signature = raw_payload.using_encoded(|e| signer.sign(e)); - Extrinsic::new_signed(self.function, signer.public(), signature, extra) + Extrinsic::new_signed(self.function, signer.public(), signature, tx_ext) } else { - Extrinsic::new_unsigned(self.function) + Extrinsic::new_bare(self.function) } } } diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 63e0aa6e137..7ec5f6722c4 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -58,9 +58,11 @@ use sp_api::{decl_runtime_apis, impl_runtime_apis}; pub use sp_core::hash::H256; use sp_inherents::{CheckInherentsResult, InherentData}; use sp_runtime::{ - create_runtime_str, impl_opaque_keys, - traits::{BlakeTwo256, Block as BlockT, DispatchInfoOf, NumberFor, Verify}, - transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, + create_runtime_str, impl_opaque_keys, impl_tx_ext_default, + traits::{BlakeTwo256, Block as BlockT, DispatchInfoOf, Dispatchable, NumberFor, Verify}, + transaction_validity::{ + TransactionSource, TransactionValidity, TransactionValidityError, ValidTransaction, + }, ApplyExtrinsicResult, ExtrinsicInclusionMode, Perbill, }; #[cfg(any(feature = "std", test))] @@ -142,13 +144,14 @@ pub type Signature = sr25519::Signature; #[cfg(feature = "std")] pub type Pair = sp_core::sr25519::Pair; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = (CheckNonce, CheckWeight, CheckSubstrateCall); +// TODO: Remove after the Checks are migrated to TxExtension. +/// The extension to the basic transaction logic. +pub type TxExtension = ((CheckNonce, CheckWeight), CheckSubstrateCall); /// The payload being signed in transactions. -pub type SignedPayload = sp_runtime::generic::SignedPayload; +pub type SignedPayload = sp_runtime::generic::SignedPayload; /// Unchecked extrinsic type as expected by this runtime. pub type Extrinsic = - sp_runtime::generic::UncheckedExtrinsic; + sp_runtime::generic::UncheckedExtrinsic; /// An identifier for an account on this system. pub type AccountId = ::Signer; @@ -243,7 +246,7 @@ impl sp_runtime::traits::Printable for CheckSubstrateCall { } impl sp_runtime::traits::Dispatchable for CheckSubstrateCall { - type RuntimeOrigin = CheckSubstrateCall; + type RuntimeOrigin = RuntimeOrigin; type Config = CheckSubstrateCall; type Info = CheckSubstrateCall; type PostInfo = CheckSubstrateCall; @@ -256,42 +259,37 @@ impl sp_runtime::traits::Dispatchable for CheckSubstrateCall { } } -impl sp_runtime::traits::SignedExtension for CheckSubstrateCall { - type AccountId = AccountId; - type Call = RuntimeCall; - type AdditionalSigned = (); - type Pre = (); +impl sp_runtime::traits::TransactionExtensionBase for CheckSubstrateCall { const IDENTIFIER: &'static str = "CheckSubstrateCall"; - - fn additional_signed( - &self, - ) -> sp_std::result::Result { - Ok(()) - } + type Implicit = (); +} +impl sp_runtime::traits::TransactionExtension + for CheckSubstrateCall +{ + type Pre = (); + type Val = (); + impl_tx_ext_default!(RuntimeCall; Context; prepare); fn validate( &self, - _who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { + _context: &mut Context, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { log::trace!(target: LOG_TARGET, "validate"); - match call { + let v = match call { RuntimeCall::SubstrateTest(ref substrate_test_call) => - substrate_test_pallet::validate_runtime_call(substrate_test_call), - _ => Ok(Default::default()), - } - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &sp_runtime::traits::DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(drop) + substrate_test_pallet::validate_runtime_call(substrate_test_call)?, + _ => Default::default(), + }; + Ok((v, (), origin)) } } @@ -364,6 +362,7 @@ impl frame_system::pallet::Config for Runtime { type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type ExtensionsWeightInfo = (); type SS58Prefix = (); type OnSetCode = (); type MaxConsumers = ConstU32<16>; @@ -672,7 +671,7 @@ impl_runtime_apis! { impl sp_offchain::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { - let ext = Extrinsic::new_unsigned( + let ext = Extrinsic::new_bare( substrate_test_pallet::pallet::Call::storage_change{ key:b"some_key".encode(), value:Some(header.number.encode()) @@ -1025,7 +1024,7 @@ mod tests { use sp_core::{storage::well_known_keys::HEAP_PAGES, traits::CallContext}; use sp_keyring::AccountKeyring; use sp_runtime::{ - traits::{Hash as _, SignedExtension}, + traits::{DispatchTransaction, Hash as _}, transaction_validity::{InvalidTransaction, ValidTransaction}, }; use substrate_test_runtime_client::{ @@ -1174,31 +1173,33 @@ mod tests { fn check_substrate_check_signed_extension_works() { sp_tracing::try_init_simple(); new_test_ext().execute_with(|| { - let x = sp_keyring::AccountKeyring::Alice.into(); + let x: AccountId = sp_keyring::AccountKeyring::Alice.into(); let info = DispatchInfo::default(); let len = 0_usize; assert_eq!( CheckSubstrateCall {} - .validate( - &x, + .validate_only( + Some(x).into(), &ExtrinsicBuilder::new_call_with_priority(16).build().function, &info, - len + len, ) .unwrap() + .0 .priority, 16 ); assert_eq!( CheckSubstrateCall {} - .validate( - &x, + .validate_only( + Some(x).into(), &ExtrinsicBuilder::new_call_do_not_propagate().build().function, &info, - len + len, ) .unwrap() + .0 .propagate, false ); diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/template.hbs b/substrate/utils/frame/benchmarking-cli/src/pallet/template.hbs index 1e5e294acba..a044049a0d6 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/template.hbs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/template.hbs @@ -22,7 +22,11 @@ use core::marker::PhantomData; /// Weight functions for `{{pallet}}`. pub struct WeightInfo(PhantomData); +{{#if (eq pallet "frame_system_extensions")}} +impl frame_system::ExtensionsWeightInfo for WeightInfo { +{{else}} impl {{pallet}}::WeightInfo for WeightInfo { +{{/if}} {{#each benchmarks as |benchmark|}} {{#each benchmark.comments as |comment|}} /// {{comment}} diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index c7399468da9..d49479d9cda 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -1204,8 +1204,9 @@ where #[cfg(test)] mod test_prelude { pub(crate) use super::*; - pub(crate) use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash}; - pub(crate) type Block = RawBlock>; + pub(crate) use sp_runtime::testing::{Block as RawBlock, MockCallU64}; + pub(crate) type UncheckedXt = sp_runtime::generic::UncheckedExtrinsic; + pub(crate) type Block = RawBlock; pub(crate) fn init_logger() { sp_tracing::try_init_simple(); diff --git a/substrate/utils/frame/rpc/client/src/lib.rs b/substrate/utils/frame/rpc/client/src/lib.rs index 221f260b156..4f4f4bbef56 100644 --- a/substrate/utils/frame/rpc/client/src/lib.rs +++ b/substrate/utils/frame/rpc/client/src/lib.rs @@ -199,11 +199,15 @@ where #[cfg(test)] mod tests { use super::*; - use sp_runtime::testing::{Block as TBlock, ExtrinsicWrapper, Header, H256}; + use sp_runtime::{ + generic::UncheckedExtrinsic, + testing::{Block as TBlock, Header, MockCallU64, H256}, + }; use std::sync::Arc; use tokio::sync::Mutex; - type Block = TBlock>; + type UncheckedXt = UncheckedExtrinsic; + type Block = TBlock; type BlockNumber = u64; type Hash = H256; -- GitLab From 0708cf382daec9996c890ab494122196a2c23d4d Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Mon, 4 Mar 2024 21:32:03 +0100 Subject: [PATCH 36/86] Contracts: Fix typo (#3563) --- substrate/frame/contracts/uapi/src/host.rs | 2 +- substrate/frame/contracts/uapi/src/host/riscv32.rs | 2 +- substrate/frame/contracts/uapi/src/host/wasm32.rs | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/substrate/frame/contracts/uapi/src/host.rs b/substrate/frame/contracts/uapi/src/host.rs index c25be4479ce..04f58895ab4 100644 --- a/substrate/frame/contracts/uapi/src/host.rs +++ b/substrate/frame/contracts/uapi/src/host.rs @@ -180,7 +180,7 @@ pub trait HostFn { flags: CallFlags, callee: &[u8], ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, deposit: Option<&[u8]>, value: &[u8], input_data: &[u8], diff --git a/substrate/frame/contracts/uapi/src/host/riscv32.rs b/substrate/frame/contracts/uapi/src/host/riscv32.rs index dbd5abc4240..561ab28747d 100644 --- a/substrate/frame/contracts/uapi/src/host/riscv32.rs +++ b/substrate/frame/contracts/uapi/src/host/riscv32.rs @@ -130,7 +130,7 @@ impl HostFn for HostFnImpl { flags: CallFlags, callee: &[u8], ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, deposit: Option<&[u8]>, value: &[u8], input_data: &[u8], diff --git a/substrate/frame/contracts/uapi/src/host/wasm32.rs b/substrate/frame/contracts/uapi/src/host/wasm32.rs index 9651aa73d6f..17c0fd341b5 100644 --- a/substrate/frame/contracts/uapi/src/host/wasm32.rs +++ b/substrate/frame/contracts/uapi/src/host/wasm32.rs @@ -223,7 +223,7 @@ mod sys { pub fn weight_to_fee( ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, output_ptr: *mut u8, output_len_ptr: *mut u32, ); @@ -239,7 +239,7 @@ mod sys { flags: u32, callee_ptr: *const u8, ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, deposit_ptr: *const u8, transferred_value_ptr: *const u8, input_data_ptr: *const u8, @@ -251,7 +251,7 @@ mod sys { pub fn instantiate( code_hash_ptr: *const u8, ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, deposit_ptr: *const u8, value_ptr: *const u8, input_ptr: *const u8, @@ -487,7 +487,7 @@ impl HostFn for HostFnImpl { flags: CallFlags, callee: &[u8], ref_time_limit: u64, - proof_time_limit: u64, + proof_size_limit: u64, deposit: Option<&[u8]>, value: &[u8], input_data: &[u8], @@ -501,7 +501,7 @@ impl HostFn for HostFnImpl { flags.bits(), callee.as_ptr(), ref_time_limit, - proof_time_limit, + proof_size_limit, deposit_ptr, value.as_ptr(), input_data.as_ptr(), -- GitLab From ec30d2f5da839a7fb3f34778013c63e5afc9e90f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 22:41:26 +0000 Subject: [PATCH 37/86] Bump mio from 0.8.8 to 0.8.11 (#3569) Bumps [mio](https://github.com/tokio-rs/mio) from 0.8.8 to 0.8.11.
Changelog

Sourced from mio's changelog.

0.8.11

0.8.10

Added

0.8.9

Added

Fixed

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=mio&package-manager=cargo&previous-version=0.8.8&new-version=0.8.11)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/paritytech/polkadot-sdk/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 81881984378..9ee48f33074 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8234,9 +8234,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", -- GitLab From c34ad77d0524add1ef7cec3de72df7874c67f8a9 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Tue, 5 Mar 2024 09:47:10 +0200 Subject: [PATCH 38/86] Remove useless Result import (#3534) Latest Rust nightly complains with `the item `Result` is imported redundantly` Co-authored-by: Sebastian Kunert Co-authored-by: Liam Aharon --- substrate/primitives/consensus/babe/src/inherents.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/substrate/primitives/consensus/babe/src/inherents.rs b/substrate/primitives/consensus/babe/src/inherents.rs index 909769f3031..54b7b644016 100644 --- a/substrate/primitives/consensus/babe/src/inherents.rs +++ b/substrate/primitives/consensus/babe/src/inherents.rs @@ -17,7 +17,6 @@ //! Inherents for BABE -use core::result::Result; use sp_inherents::{Error, InherentData, InherentIdentifier}; /// The BABE inherent identifier. -- GitLab From 0eda0b3f175338a22e263489320fe2280875711b Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Tue, 5 Mar 2024 10:47:42 +0100 Subject: [PATCH 39/86] Contracts call extract_from_slice in macro (#3566) --- substrate/frame/contracts/uapi/src/host/wasm32.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/substrate/frame/contracts/uapi/src/host/wasm32.rs b/substrate/frame/contracts/uapi/src/host/wasm32.rs index 17c0fd341b5..bc697238061 100644 --- a/substrate/frame/contracts/uapi/src/host/wasm32.rs +++ b/substrate/frame/contracts/uapi/src/host/wasm32.rs @@ -301,6 +301,7 @@ macro_rules! impl_wrapper_for { unsafe { $( $mod )::*::$name(output.as_mut_ptr(), &mut output_len); } + extract_from_slice(output, output_len as usize) } } }; -- GitLab From efcea0edab085f036b1e0033781f5f39a73b7904 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Tue, 5 Mar 2024 10:50:57 +0100 Subject: [PATCH 40/86] rpc server: add prometheus label `is_rate_limited` (#3504) After some discussion with @kogeler after the we added the rate-limit middleware it may slow down the rpc call timings metrics significantly because it works as follows: 1. The rate limit guard is checked when the call comes and if a slot is available -> process the call 2. If no free spot is available then the call will be sleeping `jitter_delay + min_time_rate_guard` then woken up and checked at most ten times 3. If no spot is available after 10 iterations -> the call is rejected (this may take tens of seconds) Thus, this PR adds a label "is_rate_limited" to filter those out on the metrics "substrate_rpc_calls_time" and "substrate_rpc_calls_finished". I had to merge two middleware layers Metrics and RateLimit to avoid shared state in a hacky way. --------- Co-authored-by: James Wilson --- Cargo.lock | 1 - prdoc/pr_3504.prdoc | 13 ++ substrate/client/rpc-servers/Cargo.toml | 1 - substrate/client/rpc-servers/src/lib.rs | 25 ++- .../rpc-servers/src/middleware/metrics.rs | 196 +++++++----------- .../client/rpc-servers/src/middleware/mod.rs | 129 +++++++++++- .../rpc-servers/src/middleware/rate_limit.rs | 86 ++------ 7 files changed, 239 insertions(+), 212 deletions(-) create mode 100644 prdoc/pr_3504.prdoc diff --git a/Cargo.lock b/Cargo.lock index 9ee48f33074..d262c1795a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16745,7 +16745,6 @@ dependencies = [ "hyper", "jsonrpsee", "log", - "pin-project", "serde_json", "substrate-prometheus-endpoint", "tokio", diff --git a/prdoc/pr_3504.prdoc b/prdoc/pr_3504.prdoc new file mode 100644 index 00000000000..90a8ddd38b8 --- /dev/null +++ b/prdoc/pr_3504.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: add prometheus label "is_rate_limited" to rpc calls + +doc: + - audience: Node Operator + description: | + This PR adds a label "is_rate_limited" to the prometheus metrics "substrate_rpc_calls_time" and "substrate_rpc_calls_finished" + than can be used to distinguish rate-limited RPC calls from other RPC calls. Because rate-limited RPC calls may take + tens of seconds. + +crates: [ ] diff --git a/substrate/client/rpc-servers/Cargo.toml b/substrate/client/rpc-servers/Cargo.toml index 7f7a86799e0..3adc81c57d5 100644 --- a/substrate/client/rpc-servers/Cargo.toml +++ b/substrate/client/rpc-servers/Cargo.toml @@ -26,5 +26,4 @@ tower = { version = "0.4.13", features = ["util"] } http = "0.2.8" hyper = "0.14.27" futures = "0.3.29" -pin-project = "1.1.3" governor = "0.6.0" diff --git a/substrate/client/rpc-servers/src/lib.rs b/substrate/client/rpc-servers/src/lib.rs index e65d954bed5..ad4b444c7ff 100644 --- a/substrate/client/rpc-servers/src/lib.rs +++ b/substrate/client/rpc-servers/src/lib.rs @@ -49,7 +49,7 @@ pub use jsonrpsee::{ }, server::{middleware::rpc::RpcServiceBuilder, BatchRequestConfig}, }; -pub use middleware::{MetricsLayer, RateLimitLayer, RpcMetrics}; +pub use middleware::{Metrics, MiddlewareLayer, RpcMetrics}; const MEGABYTE: u32 = 1024 * 1024; @@ -173,13 +173,22 @@ where let is_websocket = ws::is_upgrade_request(&req); let transport_label = if is_websocket { "ws" } else { "http" }; - let metrics = metrics.map(|m| MetricsLayer::new(m, transport_label)); - let rate_limit = rate_limit.map(|r| RateLimitLayer::per_minute(r)); + let middleware_layer = match (metrics, rate_limit) { + (None, None) => None, + (Some(metrics), None) => Some( + MiddlewareLayer::new().with_metrics(Metrics::new(metrics, transport_label)), + ), + (None, Some(rate_limit)) => + Some(MiddlewareLayer::new().with_rate_limit_per_minute(rate_limit)), + (Some(metrics), Some(rate_limit)) => Some( + MiddlewareLayer::new() + .with_metrics(Metrics::new(metrics, transport_label)) + .with_rate_limit_per_minute(rate_limit), + ), + }; - // NOTE: The metrics needs to run first to include rate-limited calls in the - // metrics. let rpc_middleware = - RpcServiceBuilder::new().option_layer(metrics.clone()).option_layer(rate_limit); + RpcServiceBuilder::new().option_layer(middleware_layer.clone()); let mut svc = service_builder.set_rpc_middleware(rpc_middleware).build(methods, stop_handle); @@ -191,9 +200,9 @@ where // Spawn a task to handle when the connection is closed. tokio_handle.spawn(async move { let now = std::time::Instant::now(); - metrics.as_ref().map(|m| m.ws_connect()); + middleware_layer.as_ref().map(|m| m.ws_connect()); on_disconnect.await; - metrics.as_ref().map(|m| m.ws_disconnect(now)); + middleware_layer.as_ref().map(|m| m.ws_disconnect(now)); }); } diff --git a/substrate/client/rpc-servers/src/middleware/metrics.rs b/substrate/client/rpc-servers/src/middleware/metrics.rs index c2d1956c3b3..17849dc0c44 100644 --- a/substrate/client/rpc-servers/src/middleware/metrics.rs +++ b/substrate/client/rpc-servers/src/middleware/metrics.rs @@ -18,15 +18,9 @@ //! RPC middleware to collect prometheus metrics on RPC calls. -use std::{ - future::Future, - pin::Pin, - task::{Context, Poll}, - time::Instant, -}; +use std::time::Instant; -use jsonrpsee::{server::middleware::rpc::RpcServiceT, types::Request, MethodResponse}; -use pin_project::pin_project; +use jsonrpsee::{types::Request, MethodResponse}; use prometheus_endpoint::{ register, Counter, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, @@ -77,7 +71,7 @@ impl RpcMetrics { "Total time [ΞΌs] of processed RPC calls", ) .buckets(HISTOGRAM_BUCKETS.to_vec()), - &["protocol", "method"], + &["protocol", "method", "is_rate_limited"], )?, metrics_registry, )?, @@ -97,7 +91,7 @@ impl RpcMetrics { "substrate_rpc_calls_finished", "Number of processed RPC calls (unique un-batched requests)", ), - &["protocol", "method", "is_error"], + &["protocol", "method", "is_error", "is_rate_limited"], )?, metrics_registry, )?, @@ -144,138 +138,90 @@ impl RpcMetrics { self.ws_sessions_closed.as_ref().map(|counter| counter.inc()); self.ws_sessions_time.with_label_values(&["ws"]).observe(micros as _); } -} - -/// Metrics layer. -#[derive(Clone)] -pub struct MetricsLayer { - inner: RpcMetrics, - transport_label: &'static str, -} - -impl MetricsLayer { - /// Create a new [`MetricsLayer`]. - pub fn new(metrics: RpcMetrics, transport_label: &'static str) -> Self { - Self { inner: metrics, transport_label } - } - - pub(crate) fn ws_connect(&self) { - self.inner.ws_connect(); - } - - pub(crate) fn ws_disconnect(&self, now: Instant) { - self.inner.ws_disconnect(now) - } -} - -impl tower::Layer for MetricsLayer { - type Service = Metrics; - - fn layer(&self, inner: S) -> Self::Service { - Metrics::new(inner, self.inner.clone(), self.transport_label) - } -} - -/// Metrics middleware. -#[derive(Clone)] -pub struct Metrics { - service: S, - metrics: RpcMetrics, - transport_label: &'static str, -} - -impl Metrics { - /// Create a new metrics middleware. - pub fn new(service: S, metrics: RpcMetrics, transport_label: &'static str) -> Metrics { - Metrics { service, metrics, transport_label } - } -} - -impl<'a, S> RpcServiceT<'a> for Metrics -where - S: Send + Sync + RpcServiceT<'a>, -{ - type Future = ResponseFuture<'a, S::Future>; - - fn call(&self, req: Request<'a>) -> Self::Future { - let now = Instant::now(); + pub(crate) fn on_call(&self, req: &Request, transport_label: &'static str) { log::trace!( target: "rpc_metrics", - "[{}] on_call name={} params={:?}", - self.transport_label, + "[{transport_label}] on_call name={} params={:?}", req.method_name(), req.params(), ); - self.metrics - .calls_started - .with_label_values(&[self.transport_label, req.method_name()]) + + self.calls_started + .with_label_values(&[transport_label, req.method_name()]) .inc(); + } - ResponseFuture { - fut: self.service.call(req.clone()), - metrics: self.metrics.clone(), - req, - now, - transport_label: self.transport_label, - } + pub(crate) fn on_response( + &self, + req: &Request, + rp: &MethodResponse, + is_rate_limited: bool, + transport_label: &'static str, + now: Instant, + ) { + log::trace!(target: "rpc_metrics", "[{transport_label}] on_response started_at={:?}", now); + log::trace!(target: "rpc_metrics::extra", "[{transport_label}] result={}", rp.as_result()); + + let micros = now.elapsed().as_micros(); + log::debug!( + target: "rpc_metrics", + "[{transport_label}] {} call took {} ΞΌs", + req.method_name(), + micros, + ); + self.calls_time + .with_label_values(&[ + transport_label, + req.method_name(), + if is_rate_limited { "true" } else { "false" }, + ]) + .observe(micros as _); + self.calls_finished + .with_label_values(&[ + transport_label, + req.method_name(), + // the label "is_error", so `success` should be regarded as false + // and vice-versa to be registrered correctly. + if rp.is_success() { "false" } else { "true" }, + if is_rate_limited { "true" } else { "false" }, + ]) + .inc(); } } -/// Response future for metrics. -#[pin_project] -pub struct ResponseFuture<'a, F> { - #[pin] - fut: F, - metrics: RpcMetrics, - req: Request<'a>, - now: Instant, - transport_label: &'static str, +/// Metrics with transport label. +#[derive(Clone, Debug)] +pub struct Metrics { + pub(crate) inner: RpcMetrics, + pub(crate) transport_label: &'static str, } -impl<'a, F> std::fmt::Debug for ResponseFuture<'a, F> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str("ResponseFuture") +impl Metrics { + /// Create a new [`Metrics`]. + pub fn new(metrics: RpcMetrics, transport_label: &'static str) -> Self { + Self { inner: metrics, transport_label } } -} -impl<'a, F: Future> Future for ResponseFuture<'a, F> { - type Output = F::Output; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.project(); + pub(crate) fn ws_connect(&self) { + self.inner.ws_connect(); + } - let res = this.fut.poll(cx); - if let Poll::Ready(rp) = &res { - let method_name = this.req.method_name(); - let transport_label = &this.transport_label; - let now = this.now; - let metrics = &this.metrics; + pub(crate) fn ws_disconnect(&self, now: Instant) { + self.inner.ws_disconnect(now) + } - log::trace!(target: "rpc_metrics", "[{transport_label}] on_response started_at={:?}", now); - log::trace!(target: "rpc_metrics::extra", "[{transport_label}] result={:?}", rp); + pub(crate) fn on_call(&self, req: &Request) { + self.inner.on_call(req, self.transport_label) + } - let micros = now.elapsed().as_micros(); - log::debug!( - target: "rpc_metrics", - "[{transport_label}] {method_name} call took {} ΞΌs", - micros, - ); - metrics - .calls_time - .with_label_values(&[transport_label, method_name]) - .observe(micros as _); - metrics - .calls_finished - .with_label_values(&[ - transport_label, - method_name, - // the label "is_error", so `success` should be regarded as false - // and vice-versa to be registrered correctly. - if rp.is_success() { "false" } else { "true" }, - ]) - .inc(); - } - res + pub(crate) fn on_response( + &self, + req: &Request, + rp: &MethodResponse, + is_rate_limited: bool, + now: Instant, + ) { + self.inner.on_response(req, rp, is_rate_limited, self.transport_label, now) } } diff --git a/substrate/client/rpc-servers/src/middleware/mod.rs b/substrate/client/rpc-servers/src/middleware/mod.rs index cac516913d3..88ed8b2f433 100644 --- a/substrate/client/rpc-servers/src/middleware/mod.rs +++ b/substrate/client/rpc-servers/src/middleware/mod.rs @@ -18,10 +18,131 @@ //! JSON-RPC specific middleware. -/// Grafana metrics middleware. -pub mod metrics; -/// Rate limit middleware. -pub mod rate_limit; +use std::{ + num::NonZeroU32, + time::{Duration, Instant}, +}; + +use futures::future::{BoxFuture, FutureExt}; +use governor::{clock::Clock, Jitter}; +use jsonrpsee::{ + server::middleware::rpc::RpcServiceT, + types::{ErrorObject, Id, Request}, + MethodResponse, +}; + +mod metrics; +mod rate_limit; pub use metrics::*; pub use rate_limit::*; + +const MAX_JITTER: Duration = Duration::from_millis(50); +const MAX_RETRIES: usize = 10; + +/// JSON-RPC middleware layer. +#[derive(Debug, Clone, Default)] +pub struct MiddlewareLayer { + rate_limit: Option, + metrics: Option, +} + +impl MiddlewareLayer { + /// Create an empty MiddlewareLayer. + pub fn new() -> Self { + Self::default() + } + + /// Enable new rate limit middleware enforced per minute. + pub fn with_rate_limit_per_minute(self, n: NonZeroU32) -> Self { + Self { rate_limit: Some(RateLimit::per_minute(n)), metrics: self.metrics } + } + + /// Enable metrics middleware. + pub fn with_metrics(self, metrics: Metrics) -> Self { + Self { rate_limit: self.rate_limit, metrics: Some(metrics) } + } + + /// Register a new websocket connection. + pub fn ws_connect(&self) { + self.metrics.as_ref().map(|m| m.ws_connect()); + } + + /// Register that a websocket connection was closed. + pub fn ws_disconnect(&self, now: Instant) { + self.metrics.as_ref().map(|m| m.ws_disconnect(now)); + } +} + +impl tower::Layer for MiddlewareLayer { + type Service = Middleware; + + fn layer(&self, service: S) -> Self::Service { + Middleware { service, rate_limit: self.rate_limit.clone(), metrics: self.metrics.clone() } + } +} + +/// JSON-RPC middleware that handles metrics +/// and rate-limiting. +/// +/// These are part of the same middleware +/// because the metrics needs to know whether +/// a call was rate-limited or not because +/// it will impact the roundtrip for a call. +pub struct Middleware { + service: S, + rate_limit: Option, + metrics: Option, +} + +impl<'a, S> RpcServiceT<'a> for Middleware +where + S: Send + Sync + RpcServiceT<'a> + Clone + 'static, +{ + type Future = BoxFuture<'a, MethodResponse>; + + fn call(&self, req: Request<'a>) -> Self::Future { + let now = Instant::now(); + + self.metrics.as_ref().map(|m| m.on_call(&req)); + + let service = self.service.clone(); + let rate_limit = self.rate_limit.clone(); + let metrics = self.metrics.clone(); + + async move { + let mut is_rate_limited = false; + + if let Some(limit) = rate_limit.as_ref() { + let mut attempts = 0; + let jitter = Jitter::up_to(MAX_JITTER); + + loop { + if attempts >= MAX_RETRIES { + return reject_too_many_calls(req.id); + } + + if let Err(rejected) = limit.inner.check() { + tokio::time::sleep(jitter + rejected.wait_time_from(limit.clock.now())) + .await; + } else { + break; + } + + is_rate_limited = true; + attempts += 1; + } + } + + let rp = service.call(req.clone()).await; + metrics.as_ref().map(|m| m.on_response(&req, &rp, is_rate_limited, now)); + + rp + } + .boxed() + } +} + +fn reject_too_many_calls(id: Id) -> MethodResponse { + MethodResponse::error(id, ErrorObject::owned(-32999, "RPC rate limit exceeded", None::<()>)) +} diff --git a/substrate/client/rpc-servers/src/middleware/rate_limit.rs b/substrate/client/rpc-servers/src/middleware/rate_limit.rs index cdcc3ebf66f..23335eb3768 100644 --- a/substrate/client/rpc-servers/src/middleware/rate_limit.rs +++ b/substrate/client/rpc-servers/src/middleware/rate_limit.rs @@ -16,92 +16,32 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! RPC rate limiting middleware. +//! RPC rate limit. -use std::{num::NonZeroU32, sync::Arc, time::Duration}; - -use futures::future::{BoxFuture, FutureExt}; use governor::{ - clock::{Clock, DefaultClock, QuantaClock}, + clock::{DefaultClock, QuantaClock}, middleware::NoOpMiddleware, state::{InMemoryState, NotKeyed}, - Jitter, -}; -use jsonrpsee::{ - server::middleware::rpc::RpcServiceT, - types::{ErrorObject, Id, Request}, - MethodResponse, + Quota, }; +use std::{num::NonZeroU32, sync::Arc}; type RateLimitInner = governor::RateLimiter; -const MAX_JITTER: Duration = Duration::from_millis(50); -const MAX_RETRIES: usize = 10; - -/// JSON-RPC rate limit middleware layer. +/// Rate limit. #[derive(Debug, Clone)] -pub struct RateLimitLayer(governor::Quota); - -impl RateLimitLayer { - /// Create new rate limit enforced per minute. - pub fn per_minute(n: NonZeroU32) -> Self { - Self(governor::Quota::per_minute(n)) - } +pub struct RateLimit { + pub(crate) inner: Arc, + pub(crate) clock: QuantaClock, } -/// JSON-RPC rate limit middleware -pub struct RateLimit { - service: S, - rate_limit: Arc, - clock: QuantaClock, -} - -impl tower::Layer for RateLimitLayer { - type Service = RateLimit; - - fn layer(&self, service: S) -> Self::Service { +impl RateLimit { + /// Create a new `RateLimit` per minute. + pub fn per_minute(n: NonZeroU32) -> Self { let clock = QuantaClock::default(); - RateLimit { - service, - rate_limit: Arc::new(RateLimitInner::direct_with_clock(self.0, &clock)), + Self { + inner: Arc::new(RateLimitInner::direct_with_clock(Quota::per_minute(n), &clock)), clock, } } } - -impl<'a, S> RpcServiceT<'a> for RateLimit -where - S: Send + Sync + RpcServiceT<'a> + Clone + 'static, -{ - type Future = BoxFuture<'a, MethodResponse>; - - fn call(&self, req: Request<'a>) -> Self::Future { - let service = self.service.clone(); - let rate_limit = self.rate_limit.clone(); - let clock = self.clock.clone(); - - async move { - let mut attempts = 0; - let jitter = Jitter::up_to(MAX_JITTER); - - loop { - if attempts >= MAX_RETRIES { - break reject_too_many_calls(req.id); - } - - if let Err(rejected) = rate_limit.check() { - tokio::time::sleep(jitter + rejected.wait_time_from(clock.now())).await; - } else { - break service.call(req).await; - } - - attempts += 1; - } - } - .boxed() - } -} - -fn reject_too_many_calls(id: Id) -> MethodResponse { - MethodResponse::error(id, ErrorObject::owned(-32999, "RPC rate limit exceeded", None::<()>)) -} -- GitLab From 601b77d9986d551ecfb2c09de9e75384f055cda4 Mon Sep 17 00:00:00 2001 From: philoniare Date: Tue, 5 Mar 2024 18:46:56 +0800 Subject: [PATCH 41/86] [Documentation] Delete unused outdated `polkadot/testing.md` (#3559) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description *Deletes `testing.md` file in accordance with the discussion on issue #2527.* Old references to Gurke or simnet have been removed. Fixes #2527 --------- Co-authored-by: Bastian KΓΆcher --- polkadot/doc/testing.md | 302 ---------------------------------------- 1 file changed, 302 deletions(-) delete mode 100644 polkadot/doc/testing.md diff --git a/polkadot/doc/testing.md b/polkadot/doc/testing.md deleted file mode 100644 index 76703b1b439..00000000000 --- a/polkadot/doc/testing.md +++ /dev/null @@ -1,302 +0,0 @@ -# Testing - -Testing is an essential tool to assure correctness. This document describes how we test the Polkadot code, whether -locally, at scale, and/or automatically in CI. - -## Scopes - -The testing strategy for Polkadot is 4-fold: - -### Unit testing (1) - -Boring, small scale correctness tests of individual functions. It is usually -enough to run `cargo test` in the crate you are testing. - -For full coverage you may have to pass some additional features. For example: - -```sh -cargo test --features ci-only-tests -``` - -### Integration tests - -There are the following variants of integration tests: - -#### Subsystem tests (2) - -One particular subsystem (subsystem under test) interacts with a mocked overseer that is made to assert incoming and -outgoing messages of the subsystem under test. See e.g. the `statement-distribution` tests. - -#### Behavior tests (3) - -Launching small scale networks, with multiple adversarial nodes. This should include tests around the thresholds in -order to evaluate the error handling once certain assumed invariants fail. - -Currently, we commonly use **zombienet** to run mini test-networks, whether locally or in CI. To run on your machine: - -- First, make sure you have [zombienet][zombienet] installed. - -- Now, all the required binaries must be installed in your $PATH. You must run the following from the `polkadot/` -directory in order to test your changes. (Not `zombienet setup`, or you will get the released binaries without your -local changes!) - -```sh -cargo install --path . --locked -``` - -- You will also need to install whatever binaries are required for your specific tests. For example, to install -`undying-collator`, from `polkadot/`, run: - -```sh -cargo install --path ./parachain/test-parachains/undying/collator --locked -``` - -- Finally, run the zombienet test from the `polkadot` directory: - -```sh -RUST_LOG=parachain::pvf=trace zombienet --provider=native spawn zombienet_tests/functional/0001-parachains-pvf.toml -``` - -- You can pick a validator node like `alice` from the output and view its logs -(`tail -f `) or metrics. Make sure there is nothing funny in the logs -(try `grep WARN `). - -#### Testing at scale (4) - -Launching many nodes with configurable network speed and node features in a cluster of nodes. At this scale the -[Simnet][simnet] comes into play which launches a full cluster of nodes. The scale is handled by spawning a kubernetes -cluster and the meta description is covered by [Gurke][Gurke]. Asserts are made using Grafana rules, based on the -existing prometheus metrics. This can be extended by adding an additional service translating `jaeger` spans into -addition prometheus avoiding additional Polkadot source changes. - -_Behavior tests_ and _testing at scale_ have naturally soft boundary. The most significant difference is the presence of -a real network and the number of nodes, since a single host often not capable to run multiple nodes at once. - -## Observing Logs - -To verify expected behavior it's often useful to observe logs. To avoid too many -logs at once, you can run one test at a time: - -1. Add `sp_tracing::try_init_simple();` to the beginning of a test -2. Specify `RUST_LOG=::=trace` before the cargo command. - -For example: - -```sh -RUST_LOG=parachain::pvf=trace cargo test execute_can_run_serially -``` - -For more info on how our logs work, check [the docs][logs]. - -## Coverage - -Coverage gives a _hint_ of the actually covered source lines by tests and test applications. - -The state of the art is currently tarpaulin which unfortunately yields a lot of false negatives. Lines that -are in fact covered, marked as uncovered due to a mere linebreak in a statement can cause these artifacts. This leads to -lower coverage percentages than there actually is. - -Since late 2020 rust has gained [MIR based coverage tooling]( -https://blog.rust-lang.org/inside-rust/2020/11/12/source-based-code-coverage.html). - -```sh -# setup -rustup component add llvm-tools-preview -cargo install grcov miniserve - -export CARGO_INCREMENTAL=0 -# wasm is not happy with the instrumentation -export SKIP_BUILD_WASM=true -export BUILD_DUMMY_WASM_BINARY=true -# the actully collected coverage data -export LLVM_PROFILE_FILE="llvmcoveragedata-%p-%m.profraw" -# build wasm without instrumentation -export WASM_TARGET_DIRECTORY=/tmp/wasm -cargo +nightly build -# required rust flags -export RUSTFLAGS="-Zinstrument-coverage" -# assure target dir is clean -rm -r target/{debug,tests} -# run tests to get coverage data -cargo +nightly test --all - -# create the *html* report out of all the test binaries -# mostly useful for local inspection -grcov . --binary-path ./target/debug -s . -t html --branch --ignore-not-existing -o ./coverage/ -miniserve -r ./coverage - -# create a *codecov* compatible report -grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --ignore "/*" -o lcov.info -``` - -The test coverage in `lcov` can the be published to . - -```sh -bash <(curl -s https://codecov.io/bash) -f lcov.info -``` - -or just printed as part of the PR using a github action i.e. -[`jest-lcov-reporter`](https://github.com/marketplace/actions/jest-lcov-reporter). - -For full examples on how to use [`grcov` /w Polkadot specifics see the github -repo](https://github.com/mozilla/grcov#coverallscodecov-output). - -## Fuzzing - -Fuzzing is an approach to verify correctness against arbitrary or partially structured inputs. - -Currently implemented fuzzing targets: - -- `erasure-coding` - -The tooling of choice here is `honggfuzz-rs` as it allows _fastest_ coverage according to "some paper" which is a -positive feature when run as part of PRs. - -Fuzzing is generally not applicable for data secured by cryptographic hashes or signatures. Either the input has to be -specifically crafted, such that the discarded input percentage stays in an acceptable range. System level fuzzing is -hence simply not feasible due to the amount of state that is required. - -Other candidates to implement fuzzing are: - -- `rpc` -- ... - -## Performance metrics - -There are various ways of performance metrics. - -- timing with `criterion` -- cache hits/misses w/ `iai` harness or `criterion-perf` -- `coz` a performance based compiler - -Most of them are standard tools to aid in the creation of statistical tests regarding change in time of certain unit -tests. - -`coz` is meant for runtime. In our case, the system is far too large to yield a sufficient number of measurements in -finite time. An alternative approach could be to record incoming package streams per subsystem and store dumps of them, -which in return could be replayed repeatedly at an accelerated speed, with which enough metrics could be obtained to -yield information on which areas would improve the metrics. This unfortunately will not yield much information, since -most if not all of the subsystem code is linear based on the input to generate one or multiple output messages, it is -unlikely to get any useful metrics without mocking a sufficiently large part of the other subsystem which overlaps with -[#Integration tests] which is unfortunately not repeatable as of now. As such the effort gain seems low and this is not -pursued at the current time. - -## Writing small scope integration tests with preconfigured workers - -Requirements: - -- spawn nodes with preconfigured behaviors -- allow multiple types of configuration to be specified -- allow extendability via external crates -- ... - ---- - -## Implementation of different behavior strain nodes - -### Goals - -The main goals are is to allow creating a test node which exhibits a certain behavior by utilizing a subset of _wrapped_ -or _replaced_ subsystems easily. The runtime must not matter at all for these tests and should be simplistic. The -execution must be fast, this mostly means to assure a close to zero network latency as well as shorting the block time -and epoch times down to a few `100ms` and a few dozend blocks per epoch. - -### Approach - -#### MVP - -A simple small scale builder pattern would suffice for stage one implementation of allowing to replace individual -subsystems. An alternative would be to harness the existing `AllSubsystems` type and replace the subsystems as needed. - -#### Full `proc-macro` implementation - -`Overseer` is a common pattern. It could be extracted as `proc` macro and generative `proc-macro`. This would replace -the `AllSubsystems` type as well as implicitly create the `AllMessages` enum as `AllSubsystemsGen` does today. - -The implementation is yet to be completed, see the [implementation PR](https://github.com/paritytech/polkadot/pull/2962) -for details. - -##### Declare an overseer implementation - -```rust -struct BehaveMaleficient; - -impl OverseerGen for BehaveMaleficient { - fn generate<'a, Spawner, RuntimeClient>( - &self, - args: OverseerGenArgs<'a, Spawner, RuntimeClient>, - ) -> Result<(Overseer>, OverseerHandler), Error> - where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, - Spawner: 'static + overseer::gen::Spawner + Clone + Unpin, - { - let spawner = args.spawner.clone(); - let leaves = args.leaves.clone(); - let runtime_client = args.runtime_client.clone(); - let registry = args.registry.clone(); - let candidate_validation_config = args.candidate_validation_config.clone(); - // modify the subsystem(s) as needed: - let all_subsystems = create_default_subsystems(args)?. - // or spawn an entirely new set - - replace_candidate_validation( - // create the filtered subsystem - FilteredSubsystem::new( - CandidateValidationSubsystem::with_config( - candidate_validation_config, - Metrics::register(registry)?, - ), - // an implementation of - Skippy::default(), - ), - ); - - Overseer::new(leaves, all_subsystems, registry, runtime_client, spawner) - .map_err(|e| e.into()) - - // A builder pattern will simplify this further - // WIP https://github.com/paritytech/polkadot/pull/2962 - } -} - -fn main() -> eyre::Result<()> { - color_eyre::install()?; - let cli = Cli::from_args(); - assert_matches::assert_matches!(cli.subcommand, None); - polkadot_cli::run_node(cli, BehaveMaleficient)?; - Ok(()) -} -``` - -[`variant-a`](../node/malus/src/variant-a.rs) is a fully working example. - -#### Simnet - -Spawn a kubernetes cluster based on a meta description using [Gurke] with the [Simnet] scripts. - -Coordinated attacks of multiple nodes or subsystems must be made possible via a side-channel, that is out of scope for -this document. - -The individual node configurations are done as targets with a particular builder configuration. - -#### Behavior tests w/o Simnet - -Commonly this will require multiple nodes, and most machines are limited to running two or three nodes concurrently. -Hence, this is not the common case and is just an implementation _idea_. - -```rust -behavior_testcase!{ -"TestRuntime" => -"Alice": , -"Bob": , -"Charles": Default, -"David": "Charles", -"Eve": "Bob", -} -``` - -[zombienet]: https://github.com/paritytech/zombienet -[Gurke]: https://github.com/paritytech/gurke -[simnet]: https://github.com/paritytech/simnet_scripts -[logs]: https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/node/gum/src/lib.rs -- GitLab From 3ff78a7888b859af7b4c5ea7247b3ddd0895728d Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Tue, 5 Mar 2024 12:04:05 +0100 Subject: [PATCH 42/86] Increase 0002-validators-warp-sync timeout (#3575) Fixes failures like: https://gitlab.parity.io/parity/mirrors/polkadot-sdk/-/jobs/5436619#L3319 --- .../0002-validators-warp-sync/test-validators-warp-sync.zndsl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl b/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl index ea0f15e5d8a..b68bce508c0 100644 --- a/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl +++ b/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl @@ -34,8 +34,8 @@ bob: reports block height is at least {{DB_BLOCK_HEIGHT}} within 10 seconds alice: reports substrate_beefy_best_block is at least {{DB_BLOCK_HEIGHT}} within 180 seconds bob: reports substrate_beefy_best_block is at least {{DB_BLOCK_HEIGHT}} within 180 seconds -alice: reports substrate_beefy_best_block is greater than {{DB_BLOCK_HEIGHT}} within 60 seconds -bob: reports substrate_beefy_best_block is greater than {{DB_BLOCK_HEIGHT}} within 60 seconds +alice: reports substrate_beefy_best_block is greater than {{DB_BLOCK_HEIGHT}} within 180 seconds +bob: reports substrate_beefy_best_block is greater than {{DB_BLOCK_HEIGHT}} within 180 seconds alice: count of log lines containing "error" is 0 within 10 seconds bob: count of log lines containing "verification failed" is 0 within 10 seconds -- GitLab From a71f018c7a6a7374d929e54b3466f1bd9bdd9de8 Mon Sep 17 00:00:00 2001 From: Matteo Muraca <56828990+muraca@users.noreply.github.com> Date: Tue, 5 Mar 2024 12:04:11 +0100 Subject: [PATCH 43/86] Removed `pallet::getter` usage from `pallet-collective` (#3456) Part of #3326 This one is easier as all the storage items are public. @ggwpez @kianenigma @shawntabrizi --------- Signed-off-by: Matteo Muraca Co-authored-by: Liam Aharon Co-authored-by: command-bot <> Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- .../collectives-westend/src/impls.rs | 2 +- prdoc/pr_3456.prdoc | 15 ++++ substrate/bin/node/runtime/src/impls.rs | 8 +-- substrate/frame/alliance/src/mock.rs | 2 +- substrate/frame/alliance/src/tests.rs | 4 +- .../frame/collective/src/benchmarking.rs | 32 ++++----- substrate/frame/collective/src/lib.rs | 72 +++++++++---------- substrate/frame/collective/src/tests.rs | 44 ++++++------ 8 files changed, 94 insertions(+), 85 deletions(-) create mode 100644 prdoc/pr_3456.prdoc diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs index caf0cddec66..e5b176fc778 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs @@ -81,7 +81,7 @@ where } fn proposal_of(proposal_hash: HashOf) -> Option> { - pallet_collective::Pallet::::proposal_of(proposal_hash) + pallet_collective::ProposalOf::::get(proposal_hash) } } diff --git a/prdoc/pr_3456.prdoc b/prdoc/pr_3456.prdoc new file mode 100644 index 00000000000..c7327e17e57 --- /dev/null +++ b/prdoc/pr_3456.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from `pallet-collective` + +doc: + - audience: Runtime Dev + description: | + This PR removes `pallet::getter` usage from `pallet-collective`, and updates dependant code accordingly. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-collective + - name: kitchensink-runtime + - name: collectives-westend-runtime diff --git a/substrate/bin/node/runtime/src/impls.rs b/substrate/bin/node/runtime/src/impls.rs index 7ff52a758b3..34f043b33a4 100644 --- a/substrate/bin/node/runtime/src/impls.rs +++ b/substrate/bin/node/runtime/src/impls.rs @@ -30,8 +30,8 @@ use pallet_identity::legacy::IdentityField; use sp_std::prelude::*; use crate::{ - AccountId, AllianceMotion, Assets, Authorship, Balances, Hash, NegativeImbalance, Runtime, - RuntimeCall, + AccountId, AllianceCollective, AllianceMotion, Assets, Authorship, Balances, Hash, + NegativeImbalance, Runtime, RuntimeCall, }; pub struct Author; @@ -107,7 +107,7 @@ impl ProposalProvider for AllianceProposalProvider } fn proposal_of(proposal_hash: Hash) -> Option { - AllianceMotion::proposal_of(proposal_hash) + pallet_collective::ProposalOf::::get(proposal_hash) } } @@ -276,7 +276,7 @@ mod multiplier_tests { let next = runtime_multiplier_update(fm); fm = next; if fm == min_multiplier() { - break + break; } iterations += 1; } diff --git a/substrate/frame/alliance/src/mock.rs b/substrate/frame/alliance/src/mock.rs index 4a65485ed8f..fd44d33ef93 100644 --- a/substrate/frame/alliance/src/mock.rs +++ b/substrate/frame/alliance/src/mock.rs @@ -208,7 +208,7 @@ impl ProposalProvider for AllianceProposalProvider } fn proposal_of(proposal_hash: H256) -> Option { - AllianceMotion::proposal_of(proposal_hash) + pallet_collective::ProposalOf::::get(proposal_hash) } } diff --git a/substrate/frame/alliance/src/tests.rs b/substrate/frame/alliance/src/tests.rs index 8011627b237..710de5a54bc 100644 --- a/substrate/frame/alliance/src/tests.rs +++ b/substrate/frame/alliance/src/tests.rs @@ -187,8 +187,8 @@ fn propose_works() { Box::new(proposal.clone()), proposal_len )); - assert_eq!(*AllianceMotion::proposals(), vec![hash]); - assert_eq!(AllianceMotion::proposal_of(&hash), Some(proposal)); + assert_eq!(*pallet_collective::Proposals::::get(), vec![hash]); + assert_eq!(pallet_collective::ProposalOf::::get(&hash), Some(proposal)); assert_eq!( System::events(), vec![EventRecord { diff --git a/substrate/frame/collective/src/benchmarking.rs b/substrate/frame/collective/src/benchmarking.rs index af10eae5b67..7b5df17b60a 100644 --- a/substrate/frame/collective/src/benchmarking.rs +++ b/substrate/frame/collective/src/benchmarking.rs @@ -105,7 +105,7 @@ benchmarks_instance_pallet! { }: _(SystemOrigin::Root, new_members.clone(), new_members.last().cloned(), T::MaxMembers::get()) verify { new_members.sort(); - assert_eq!(Collective::::members(), new_members); + assert_eq!(Members::::get(), new_members); } execute { @@ -199,14 +199,14 @@ benchmarks_instance_pallet! { )?; } - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); let proposal: T::Proposal = SystemCall::::remark { remark: id_to_remark_data(p, b as usize) }.into(); }: propose(SystemOrigin::Signed(caller.clone()), threshold, Box::new(proposal.clone()), bytes_in_storage) verify { // New proposal is recorded - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); let proposal_hash = T::Hashing::hash_of(&proposal); assert_last_event::(Event::Proposed { account: caller, proposal_index: p - 1, proposal_hash, threshold }.into()); } @@ -269,7 +269,7 @@ benchmarks_instance_pallet! { approve, )?; - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); // Voter switches vote to nay, but does not kill the vote, just updates + inserts let approve = false; @@ -280,8 +280,8 @@ benchmarks_instance_pallet! { }: _(SystemOrigin::Signed(voter), last_hash, index, approve) verify { // All proposals exist and the last proposal has just been updated. - assert_eq!(Collective::::proposals().len(), p as usize); - let voting = Collective::::voting(&last_hash).ok_or("Proposal Missing")?; + assert_eq!(Proposals::::get().len(), p as usize); + let voting = Voting::::get(&last_hash).ok_or("Proposal Missing")?; assert_eq!(voting.ayes.len(), (m - 3) as usize); assert_eq!(voting.nays.len(), 1); } @@ -344,7 +344,7 @@ benchmarks_instance_pallet! { approve, )?; - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); // Voter switches vote to nay, which kills the vote let approve = false; @@ -361,7 +361,7 @@ benchmarks_instance_pallet! { }: close(SystemOrigin::Signed(voter), last_hash, index, Weight::MAX, bytes_in_storage) verify { // The last proposal is removed. - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); } @@ -428,7 +428,7 @@ benchmarks_instance_pallet! { true, )?; - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); // Caller switches vote to aye, which passes the vote let index = p - 1; @@ -442,7 +442,7 @@ benchmarks_instance_pallet! { }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::MAX, bytes_in_storage) verify { // The last proposal is removed. - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); assert_last_event::(Event::Executed { proposal_hash: last_hash, result: Ok(()) }.into()); } @@ -519,12 +519,12 @@ benchmarks_instance_pallet! { )?; System::::set_block_number(BlockNumberFor::::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); // Prime nay will close it as disapproved }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::MAX, bytes_in_storage) verify { - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); } @@ -591,12 +591,12 @@ benchmarks_instance_pallet! { // caller is prime, prime already votes aye by creating the proposal System::::set_block_number(BlockNumberFor::::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); // Prime aye will close it as approved }: close(SystemOrigin::Signed(caller), last_hash, p - 1, Weight::MAX, bytes_in_storage) verify { - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); assert_last_event::(Event::Executed { proposal_hash: last_hash, result: Ok(()) }.into()); } @@ -640,11 +640,11 @@ benchmarks_instance_pallet! { } System::::set_block_number(BlockNumberFor::::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Proposals::::get().len(), p as usize); }: _(SystemOrigin::Root, last_hash) verify { - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Proposals::::get().len(), (p - 1) as usize); assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); } diff --git a/substrate/frame/collective/src/lib.rs b/substrate/frame/collective/src/lib.rs index dd481b53681..882e99a6d00 100644 --- a/substrate/frame/collective/src/lib.rs +++ b/substrate/frame/collective/src/lib.rs @@ -261,36 +261,30 @@ pub mod pallet { /// The hashes of the active proposals. #[pallet::storage] - #[pallet::getter(fn proposals)] pub type Proposals, I: 'static = ()> = StorageValue<_, BoundedVec, ValueQuery>; /// Actual proposal for a given hash, if it's current. #[pallet::storage] - #[pallet::getter(fn proposal_of)] pub type ProposalOf, I: 'static = ()> = StorageMap<_, Identity, T::Hash, >::Proposal, OptionQuery>; /// Votes on a given proposal, if it is ongoing. #[pallet::storage] - #[pallet::getter(fn voting)] pub type Voting, I: 'static = ()> = StorageMap<_, Identity, T::Hash, Votes>, OptionQuery>; /// Proposals so far. #[pallet::storage] - #[pallet::getter(fn proposal_count)] pub type ProposalCount, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; /// The current members of the collective. This is stored sorted (just by value). #[pallet::storage] - #[pallet::getter(fn members)] pub type Members, I: 'static = ()> = StorageValue<_, Vec, ValueQuery>; /// The prime member that helps determine the default vote behavior in case of absentations. #[pallet::storage] - #[pallet::getter(fn prime)] pub type Prime, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; #[pallet::event] @@ -459,7 +453,7 @@ pub mod pallet { #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - let members = Self::members(); + let members = Members::::get(); ensure!(members.contains(&who), Error::::NotMember); let proposal_len = proposal.encoded_size(); ensure!(proposal_len <= length_bound as usize, Error::::WrongProposalLength); @@ -519,7 +513,7 @@ pub mod pallet { #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - let members = Self::members(); + let members = Members::::get(); ensure!(members.contains(&who), Error::::NotMember); if threshold < 2 { @@ -565,7 +559,7 @@ pub mod pallet { approve: bool, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - let members = Self::members(); + let members = Members::::get(); ensure!(members.contains(&who), Error::::NotMember); // Detects first vote of the member in the motion @@ -669,7 +663,7 @@ impl, I: 'static> Pallet { pub fn is_member(who: &T::AccountId) -> bool { // Note: The dispatchables *do not* use this to check membership so make sure // to update those if this is changed. - Self::members().contains(who) + Members::::get().contains(who) } /// Execute immediately when adding a new proposal. @@ -688,7 +682,7 @@ impl, I: 'static> Pallet { let proposal_hash = T::Hashing::hash_of(&proposal); ensure!(!>::contains_key(proposal_hash), Error::::DuplicateProposal); - let seats = Self::members().len() as MemberCount; + let seats = Members::::get().len() as MemberCount; let result = proposal.dispatch(RawOrigin::Members(1, seats).into()); Self::deposit_event(Event::Executed { proposal_hash, @@ -721,7 +715,7 @@ impl, I: 'static> Pallet { Ok(proposals.len()) })?; - let index = Self::proposal_count(); + let index = ProposalCount::::get(); >::mutate(|i| *i += 1); >::insert(proposal_hash, proposal); let votes = { @@ -747,7 +741,7 @@ impl, I: 'static> Pallet { index: ProposalIndex, approve: bool, ) -> Result { - let mut voting = Self::voting(&proposal).ok_or(Error::::ProposalMissing)?; + let mut voting = Voting::::get(&proposal).ok_or(Error::::ProposalMissing)?; ensure!(voting.index == index, Error::::WrongIndex); let position_yes = voting.ayes.iter().position(|a| a == &who); @@ -798,12 +792,12 @@ impl, I: 'static> Pallet { proposal_weight_bound: Weight, length_bound: u32, ) -> DispatchResultWithPostInfo { - let voting = Self::voting(&proposal_hash).ok_or(Error::::ProposalMissing)?; + let voting = Voting::::get(&proposal_hash).ok_or(Error::::ProposalMissing)?; ensure!(voting.index == index, Error::::WrongIndex); let mut no_votes = voting.nays.len() as MemberCount; let mut yes_votes = voting.ayes.len() as MemberCount; - let seats = Self::members().len() as MemberCount; + let seats = Members::::get().len() as MemberCount; let approved = yes_votes >= voting.threshold; let disapproved = seats.saturating_sub(no_votes) < voting.threshold; // Allow (dis-)approving the proposal as soon as there are enough votes. @@ -837,7 +831,7 @@ impl, I: 'static> Pallet { // Only allow actual closing of the proposal after the voting period has ended. ensure!(frame_system::Pallet::::block_number() >= voting.end, Error::::TooEarly); - let prime_vote = Self::prime().map(|who| voting.ayes.iter().any(|a| a == &who)); + let prime_vote = Prime::::get().map(|who| voting.ayes.iter().any(|a| a == &who)); // default voting strategy. let default = T::DefaultVote::default_vote(prime_vote, yes_votes, no_votes, seats); @@ -978,29 +972,28 @@ impl, I: 'static> Pallet { /// * The prime account must be a member of the collective. #[cfg(any(feature = "try-runtime", test))] fn do_try_state() -> Result<(), TryRuntimeError> { - Self::proposals() - .into_iter() - .try_for_each(|proposal| -> Result<(), TryRuntimeError> { + Proposals::::get().into_iter().try_for_each( + |proposal| -> Result<(), TryRuntimeError> { ensure!( - Self::proposal_of(proposal).is_some(), + ProposalOf::::get(proposal).is_some(), "Proposal hash from `Proposals` is not found inside the `ProposalOf` mapping." ); Ok(()) - })?; + }, + )?; ensure!( - Self::proposals().into_iter().count() <= Self::proposal_count() as usize, + Proposals::::get().into_iter().count() <= ProposalCount::::get() as usize, "The actual number of proposals is greater than `ProposalCount`" ); ensure!( - Self::proposals().into_iter().count() == >::iter_keys().count(), + Proposals::::get().into_iter().count() == >::iter_keys().count(), "Proposal count inside `Proposals` is not equal to the proposal count in `ProposalOf`" ); - Self::proposals() - .into_iter() - .try_for_each(|proposal| -> Result<(), TryRuntimeError> { - if let Some(votes) = Self::voting(proposal) { + Proposals::::get().into_iter().try_for_each( + |proposal| -> Result<(), TryRuntimeError> { + if let Some(votes) = Voting::::get(proposal) { let ayes = votes.ayes.len(); let nays = votes.nays.len(); @@ -1010,13 +1003,13 @@ impl, I: 'static> Pallet { ); } Ok(()) - })?; + }, + )?; let mut proposal_indices = vec![]; - Self::proposals() - .into_iter() - .try_for_each(|proposal| -> Result<(), TryRuntimeError> { - if let Some(votes) = Self::voting(proposal) { + Proposals::::get().into_iter().try_for_each( + |proposal| -> Result<(), TryRuntimeError> { + if let Some(votes) = Voting::::get(proposal) { let proposal_index = votes.index; ensure!( !proposal_indices.contains(&proposal_index), @@ -1025,12 +1018,13 @@ impl, I: 'static> Pallet { proposal_indices.push(proposal_index); } Ok(()) - })?; + }, + )?; >::iter_keys().try_for_each( |proposal_hash| -> Result<(), TryRuntimeError> { ensure!( - Self::proposals().contains(&proposal_hash), + Proposals::::get().contains(&proposal_hash), "`Proposals` doesn't contain the proposal hash from the `Voting` storage map." ); Ok(()) @@ -1038,17 +1032,17 @@ impl, I: 'static> Pallet { )?; ensure!( - Self::members().len() <= T::MaxMembers::get() as usize, + Members::::get().len() <= T::MaxMembers::get() as usize, "The member count is greater than `MaxMembers`." ); ensure!( - Self::members().windows(2).all(|members| members[0] <= members[1]), + Members::::get().windows(2).all(|members| members[0] <= members[1]), "The members are not sorted by value." ); - if let Some(prime) = Self::prime() { - ensure!(Self::members().contains(&prime), "Prime account is not a member."); + if let Some(prime) = Prime::::get() { + ensure!(Members::::get().contains(&prime), "Prime account is not a member."); } Ok(()) @@ -1082,7 +1076,7 @@ impl, I: 'static> ChangeMembers for Pallet { // remove accounts from all current voting in motions. let mut outgoing = outgoing.to_vec(); outgoing.sort(); - for h in Self::proposals().into_iter() { + for h in Proposals::::get().into_iter() { >::mutate(h, |v| { if let Some(mut votes) = v.take() { votes.ayes = votes diff --git a/substrate/frame/collective/src/tests.rs b/substrate/frame/collective/src/tests.rs index ac5797c638c..92418794c5f 100644 --- a/substrate/frame/collective/src/tests.rs +++ b/substrate/frame/collective/src/tests.rs @@ -193,8 +193,8 @@ fn default_max_proposal_weight() -> Weight { #[test] fn motions_basic_environment_works() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Collective::members(), vec![1, 2, 3]); - assert_eq!(*Collective::proposals(), Vec::::new()); + assert_eq!(Members::::get(), vec![1, 2, 3]); + assert_eq!(*Proposals::::get(), Vec::::new()); }); } @@ -205,7 +205,7 @@ fn initialize_members_sorts_members() { ExtBuilder::default() .set_collective_members(unsorted_members) .build_and_execute(|| { - assert_eq!(Collective::members(), expected_members); + assert_eq!(Members::::get(), expected_members); }); } @@ -219,8 +219,8 @@ fn set_members_with_prime_works() { Some(3), MaxMembers::get() )); - assert_eq!(Collective::members(), members.clone()); - assert_eq!(Collective::prime(), Some(3)); + assert_eq!(Members::::get(), members.clone()); + assert_eq!(Prime::::get(), Some(3)); assert_noop!( Collective::set_members(RuntimeOrigin::root(), members, Some(4), MaxMembers::get()), Error::::PrimeAccountNotMember @@ -632,12 +632,12 @@ fn removal_of_old_voters_votes_works() { assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) ); Collective::change_members_sorted(&[4], &[1], &[2, 3, 4]); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) ); @@ -653,12 +653,12 @@ fn removal_of_old_voters_votes_works() { assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 1, true)); assert_ok!(Collective::vote(RuntimeOrigin::signed(3), hash, 1, false)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) ); Collective::change_members_sorted(&[], &[3], &[2, 4]); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) ); }); @@ -680,7 +680,7 @@ fn removal_of_old_voters_votes_works_with_set_members() { assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) ); assert_ok!(Collective::set_members( @@ -690,7 +690,7 @@ fn removal_of_old_voters_votes_works_with_set_members() { MaxMembers::get() )); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) ); @@ -706,7 +706,7 @@ fn removal_of_old_voters_votes_works_with_set_members() { assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 1, true)); assert_ok!(Collective::vote(RuntimeOrigin::signed(3), hash, 1, false)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) ); assert_ok!(Collective::set_members( @@ -716,7 +716,7 @@ fn removal_of_old_voters_votes_works_with_set_members() { MaxMembers::get() )); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) ); }); @@ -735,10 +735,10 @@ fn propose_works() { Box::new(proposal.clone()), proposal_len )); - assert_eq!(*Collective::proposals(), vec![hash]); - assert_eq!(Collective::proposal_of(&hash), Some(proposal)); + assert_eq!(*Proposals::::get(), vec![hash]); + assert_eq!(ProposalOf::::get(&hash), Some(proposal)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![], nays: vec![], end }) ); @@ -898,13 +898,13 @@ fn motions_vote_after_works() { )); // Initially there a no votes when the motion is proposed. assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![], end }) ); // Cast first aye vote. assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![1], nays: vec![], end }) ); // Try to cast a duplicate aye vote. @@ -915,7 +915,7 @@ fn motions_vote_after_works() { // Cast a nay vote. assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, false)); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![1], end }) ); // Try to cast a duplicate nay vote. @@ -966,7 +966,7 @@ fn motions_all_first_vote_free_works() { proposal_len, )); assert_eq!( - Collective::voting(&hash), + Voting::::get(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![], end }) ); @@ -1031,14 +1031,14 @@ fn motions_reproposing_disapproved_works() { proposal_weight, proposal_len )); - assert_eq!(*Collective::proposals(), vec![]); + assert_eq!(*Proposals::::get(), vec![]); assert_ok!(Collective::propose( RuntimeOrigin::signed(1), 2, Box::new(proposal.clone()), proposal_len )); - assert_eq!(*Collective::proposals(), vec![hash]); + assert_eq!(*Proposals::::get(), vec![hash]); }); } -- GitLab From 4c810609d6c72b48bd748398165da5c27a604778 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Tue, 5 Mar 2024 11:40:37 +0000 Subject: [PATCH 44/86] Repot all templates into a single directory (#3460) The first step towards https://github.com/paritytech/polkadot-sdk/issues/3155 Brings all templates under the following structure ``` templates | parachain | | polkadot-launch | | runtime --> parachain-template-runtime | | pallets --> pallet-parachain-template | | node --> parachain-template-node | minimal | | runtime --> minimal-template-runtime | | pallets --> pallet-minimal-template | | node --> minimal-template-node | solochain | | runtime --> solochain-template-runtime | | pallets --> pallet-template (the naming is not consistent here) | | node --> solochain-template-node ``` The only note-worthy changes in this PR are: - More `Cargo.toml` fields are forwarded to use the one from the workspace. - parachain template now has weights and benchmarks - adds a shell pallet to the minimal template - remove a few unused deps A list of possible follow-ups: - [ ] Unify READMEs, create a parent README for all - [ ] remove references to `docs.substrate.io` in templates - [ ] make all templates use `#[derive_impl]` - [ ] update and unify all licenses - [ ] Remove polkadot launch, use https://github.com/paritytech/polkadot-sdk/blob/35349df993ea2e7c4769914ef5d199e787b23d4c/cumulus/zombienet/examples/small_network.toml instead. --- .github/scripts/check-workspace.py | 22 +- .github/workflows/check-licenses.yml | 1 - .gitlab/pipeline/build.yml | 2 +- Cargo.lock | 195 +++++++++--------- Cargo.toml | 21 +- .../pallets/template/README.md | 1 - .../pallets/template/src/benchmarking.rs | 20 -- prdoc/pr_3460.prdoc | 26 +++ substrate/bin/minimal/node/Cargo.toml | 60 ------ substrate/bin/minimal/runtime/Cargo.toml | 50 ----- substrate/bin/node-template/.editorconfig | 16 -- substrate/bin/node-template/env-setup/.envrc | 1 - substrate/bin/node-template/node/Cargo.toml | 93 --------- .../bin/node-template/runtime/Cargo.toml | 126 ----------- substrate/frame/Cargo.toml | 13 +- .../frame/examples/default-config/src/lib.rs | 2 +- substrate/frame/system/src/lib.rs | 2 +- templates/minimal/README.md | 0 templates/minimal/node/Cargo.toml | 62 ++++++ .../bin => templates}/minimal/node/build.rs | 0 .../minimal/node/src/chain_spec.rs | 0 .../bin => templates}/minimal/node/src/cli.rs | 0 .../minimal/node/src/command.rs | 0 .../bin => templates}/minimal/node/src/lib.rs | 2 +- .../minimal/node/src/main.rs | 0 .../bin => templates}/minimal/node/src/rpc.rs | 0 .../minimal/node/src/service.rs | 0 templates/minimal/pallets/template/Cargo.toml | 33 +++ templates/minimal/pallets/template/src/lib.rs | 19 ++ templates/minimal/runtime/Cargo.toml | 59 ++++++ .../minimal/runtime/build.rs | 0 .../minimal/runtime/src/lib.rs | 15 +- .../parachain}/LICENSE | 0 .../parachain}/README.md | 0 .../parachain}/node/Cargo.toml | 33 +-- .../parachain}/node/build.rs | 0 .../parachain}/node/src/chain_spec.rs | 13 +- .../parachain}/node/src/cli.rs | 0 .../parachain}/node/src/command.rs | 0 .../parachain}/node/src/main.rs | 0 .../parachain}/node/src/rpc.rs | 0 .../parachain}/node/src/service.rs | 0 .../parachain}/pallets/template/Cargo.toml | 30 +-- .../parachain}/pallets/template/README.md | 0 .../pallets/template/src/benchmarking.rs | 0 .../parachain}/pallets/template/src/lib.rs | 4 + .../parachain}/pallets/template/src/mock.rs | 1 + .../parachain}/pallets/template/src/tests.rs | 0 .../pallets/template/src/weights.rs | 0 .../parachain}/polkadot-launch/config.json | 0 .../parachain}/runtime/Cargo.toml | 49 +++-- .../parachain}/runtime/build.rs | 0 .../parachain}/runtime/src/lib.rs | 5 +- .../runtime/src/weights/block_weights.rs | 0 .../runtime/src/weights/extrinsic_weights.rs | 0 .../parachain}/runtime/src/weights/mod.rs | 0 .../runtime/src/weights/paritydb_weights.rs | 0 .../runtime/src/weights/rocksdb_weights.rs | 0 .../parachain}/runtime/src/xcm_config.rs | 0 .../solochain}/LICENSE | 0 .../solochain}/README.md | 0 .../solochain}/docs/rust-setup.md | 0 .../solochain}/env-setup/README.md | 0 .../solochain}/env-setup/flake.lock | 0 .../solochain}/env-setup/flake.nix | 0 .../solochain}/env-setup/rust-toolchain.toml | 0 templates/solochain/node/Cargo.toml | 92 +++++++++ .../solochain}/node/build.rs | 0 .../solochain}/node/src/benchmarking.rs | 2 +- .../solochain}/node/src/chain_spec.rs | 2 +- .../solochain}/node/src/cli.rs | 0 .../solochain}/node/src/command.rs | 2 +- .../solochain}/node/src/main.rs | 0 .../solochain}/node/src/rpc.rs | 2 +- .../solochain}/node/src/service.rs | 2 +- .../solochain}/pallets/template/Cargo.toml | 30 +-- .../solochain/pallets/template/README.md | 1 + .../pallets/template/src/benchmarking.rs | 35 ++++ .../solochain}/pallets/template/src/lib.rs | 0 .../solochain}/pallets/template/src/mock.rs | 0 .../solochain}/pallets/template/src/tests.rs | 0 .../solochain/pallets/template/src/weights.rs | 90 ++++++++ templates/solochain/runtime/Cargo.toml | 152 ++++++++++++++ .../solochain}/runtime/build.rs | 0 .../solochain}/runtime/src/lib.rs | 9 +- 85 files changed, 819 insertions(+), 576 deletions(-) delete mode 100644 cumulus/parachain-template/pallets/template/README.md delete mode 100644 cumulus/parachain-template/pallets/template/src/benchmarking.rs create mode 100644 prdoc/pr_3460.prdoc delete mode 100644 substrate/bin/minimal/node/Cargo.toml delete mode 100644 substrate/bin/minimal/runtime/Cargo.toml delete mode 100644 substrate/bin/node-template/.editorconfig delete mode 100644 substrate/bin/node-template/env-setup/.envrc delete mode 100644 substrate/bin/node-template/node/Cargo.toml delete mode 100644 substrate/bin/node-template/runtime/Cargo.toml create mode 100644 templates/minimal/README.md create mode 100644 templates/minimal/node/Cargo.toml rename {substrate/bin => templates}/minimal/node/build.rs (100%) rename {substrate/bin => templates}/minimal/node/src/chain_spec.rs (100%) rename {substrate/bin => templates}/minimal/node/src/cli.rs (100%) rename {substrate/bin => templates}/minimal/node/src/command.rs (100%) rename {substrate/bin => templates}/minimal/node/src/lib.rs (94%) rename {substrate/bin => templates}/minimal/node/src/main.rs (100%) rename {substrate/bin => templates}/minimal/node/src/rpc.rs (100%) rename {substrate/bin => templates}/minimal/node/src/service.rs (100%) create mode 100644 templates/minimal/pallets/template/Cargo.toml create mode 100644 templates/minimal/pallets/template/src/lib.rs create mode 100644 templates/minimal/runtime/Cargo.toml rename {substrate/bin => templates}/minimal/runtime/build.rs (100%) rename {substrate/bin => templates}/minimal/runtime/src/lib.rs (95%) rename {cumulus/parachain-template => templates/parachain}/LICENSE (100%) rename {cumulus/parachain-template => templates/parachain}/README.md (100%) rename {cumulus/parachain-template => templates/parachain}/node/Cargo.toml (78%) rename {cumulus/parachain-template => templates/parachain}/node/build.rs (100%) rename {cumulus/parachain-template => templates/parachain}/node/src/chain_spec.rs (93%) rename {cumulus/parachain-template => templates/parachain}/node/src/cli.rs (100%) rename {cumulus/parachain-template => templates/parachain}/node/src/command.rs (100%) rename {cumulus/parachain-template => templates/parachain}/node/src/main.rs (100%) rename {cumulus/parachain-template => templates/parachain}/node/src/rpc.rs (100%) rename {cumulus/parachain-template => templates/parachain}/node/src/service.rs (100%) rename {cumulus/parachain-template => templates/parachain}/pallets/template/Cargo.toml (68%) rename {substrate/bin/node-template => templates/parachain}/pallets/template/README.md (100%) rename {substrate/bin/node-template => templates/parachain}/pallets/template/src/benchmarking.rs (100%) rename {cumulus/parachain-template => templates/parachain}/pallets/template/src/lib.rs (96%) rename {cumulus/parachain-template => templates/parachain}/pallets/template/src/mock.rs (98%) rename {cumulus/parachain-template => templates/parachain}/pallets/template/src/tests.rs (100%) rename {substrate/bin/node-template => templates/parachain}/pallets/template/src/weights.rs (100%) rename {cumulus/parachain-template => templates/parachain}/polkadot-launch/config.json (100%) rename {cumulus/parachain-template => templates/parachain}/runtime/Cargo.toml (82%) rename {cumulus/parachain-template => templates/parachain}/runtime/build.rs (100%) rename {cumulus/parachain-template => templates/parachain}/runtime/src/lib.rs (99%) rename {cumulus/parachain-template => templates/parachain}/runtime/src/weights/block_weights.rs (100%) rename {cumulus/parachain-template => templates/parachain}/runtime/src/weights/extrinsic_weights.rs (100%) rename {cumulus/parachain-template => templates/parachain}/runtime/src/weights/mod.rs (100%) rename {cumulus/parachain-template => templates/parachain}/runtime/src/weights/paritydb_weights.rs (100%) rename {cumulus/parachain-template => templates/parachain}/runtime/src/weights/rocksdb_weights.rs (100%) rename {cumulus/parachain-template => templates/parachain}/runtime/src/xcm_config.rs (100%) rename {substrate/bin/node-template => templates/solochain}/LICENSE (100%) rename {substrate/bin/node-template => templates/solochain}/README.md (100%) rename {substrate/bin/node-template => templates/solochain}/docs/rust-setup.md (100%) rename {substrate/bin/node-template => templates/solochain}/env-setup/README.md (100%) rename {substrate/bin/node-template => templates/solochain}/env-setup/flake.lock (100%) rename {substrate/bin/node-template => templates/solochain}/env-setup/flake.nix (100%) rename {substrate/bin/node-template => templates/solochain}/env-setup/rust-toolchain.toml (100%) create mode 100644 templates/solochain/node/Cargo.toml rename {substrate/bin/node-template => templates/solochain}/node/build.rs (100%) rename {substrate/bin/node-template => templates/solochain}/node/src/benchmarking.rs (99%) rename {substrate/bin/node-template => templates/solochain}/node/src/chain_spec.rs (97%) rename {substrate/bin/node-template => templates/solochain}/node/src/cli.rs (100%) rename {substrate/bin/node-template => templates/solochain}/node/src/command.rs (98%) rename {substrate/bin/node-template => templates/solochain}/node/src/main.rs (100%) rename {substrate/bin/node-template => templates/solochain}/node/src/rpc.rs (96%) rename {substrate/bin/node-template => templates/solochain}/node/src/service.rs (99%) rename {substrate/bin/node-template => templates/solochain}/pallets/template/Cargo.toml (55%) create mode 100644 templates/solochain/pallets/template/README.md create mode 100644 templates/solochain/pallets/template/src/benchmarking.rs rename {substrate/bin/node-template => templates/solochain}/pallets/template/src/lib.rs (100%) rename {substrate/bin/node-template => templates/solochain}/pallets/template/src/mock.rs (100%) rename {substrate/bin/node-template => templates/solochain}/pallets/template/src/tests.rs (100%) create mode 100644 templates/solochain/pallets/template/src/weights.rs create mode 100644 templates/solochain/runtime/Cargo.toml rename {substrate/bin/node-template => templates/solochain}/runtime/build.rs (100%) rename {substrate/bin/node-template => templates/solochain}/runtime/src/lib.rs (98%) diff --git a/.github/scripts/check-workspace.py b/.github/scripts/check-workspace.py index d200122fee9..1f8f103e4e1 100644 --- a/.github/scripts/check-workspace.py +++ b/.github/scripts/check-workspace.py @@ -18,7 +18,7 @@ def parse_args(): parser.add_argument('workspace_dir', help='The directory to check', metavar='workspace_dir', type=str, nargs=1) parser.add_argument('--exclude', help='Exclude crate paths from the check', metavar='exclude', type=str, nargs='*', default=[]) - + args = parser.parse_args() return (args.workspace_dir[0], args.exclude) @@ -26,7 +26,7 @@ def main(root, exclude): workspace_crates = get_members(root, exclude) all_crates = get_crates(root, exclude) print(f'πŸ“¦ Found {len(all_crates)} crates in total') - + check_duplicates(workspace_crates) check_missing(workspace_crates, all_crates) check_links(all_crates) @@ -48,14 +48,14 @@ def get_members(workspace_dir, exclude): if not 'members' in root_manifest['workspace']: return [] - + members = [] for member in root_manifest['workspace']['members']: if member in exclude: print(f'❌ Excluded member should not appear in the workspace {member}') sys.exit(1) members.append(member) - + return members # List all members of the workspace. @@ -74,12 +74,12 @@ def get_crates(workspace_dir, exclude_crates) -> dict: with open(path, "r") as f: content = f.read() manifest = toml.loads(content) - + if 'workspace' in manifest: if root != workspace_dir: print("⏩ Excluded recursive workspace at %s" % path) continue - + # Cut off the root path and the trailing /Cargo.toml. path = path[len(workspace_dir)+1:-11] name = manifest['package']['name'] @@ -87,7 +87,7 @@ def get_crates(workspace_dir, exclude_crates) -> dict: print("⏩ Excluded crate %s at %s" % (name, path)) continue crates[name] = (path, manifest) - + return crates # Check that there are no duplicate entries in the workspace. @@ -138,23 +138,23 @@ def check_links(all_crates): if not 'path' in deps[dep]: broken.append((name, dep_name, "crate must be linked via `path`")) return - + def check_crate(deps): to_checks = ['dependencies', 'dev-dependencies', 'build-dependencies'] for to_check in to_checks: if to_check in deps: check_deps(deps[to_check]) - + # There could possibly target dependant deps: if 'target' in manifest: # Target dependant deps can only have one level of nesting: for _, target in manifest['target'].items(): check_crate(target) - + check_crate(manifest) - + links.sort() broken.sort() diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index e1e92d288ce..c32b6fcf89e 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -42,5 +42,4 @@ jobs: shopt -s globstar npx @paritytech/license-scanner scan \ --ensure-licenses ${{ env.LICENSES }} \ - --exclude ./substrate/bin/node-template \ -- ./substrate/**/*.rs diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 423587c1fb5..f8de6135572 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -337,7 +337,7 @@ build-runtimes-polkavm: - .common-refs - .run-immediately script: - - SUBSTRATE_RUNTIME_TARGET=riscv cargo check -p minimal-runtime + - SUBSTRATE_RUNTIME_TARGET=riscv cargo check -p minimal-template-runtime - SUBSTRATE_RUNTIME_TARGET=riscv cargo check -p westend-runtime - SUBSTRATE_RUNTIME_TARGET=riscv cargo check -p rococo-runtime - SUBSTRATE_RUNTIME_TARGET=riscv cargo check -p polkadot-test-runtime diff --git a/Cargo.lock b/Cargo.lock index d262c1795a6..26e134448ff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8172,15 +8172,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] -name = "minimal-node" -version = "4.0.0-dev" +name = "minimal-template-node" +version = "0.0.0" dependencies = [ "clap 4.5.1", "frame", "futures", "futures-timer", "jsonrpsee", - "minimal-runtime", + "minimal-template-runtime", "sc-basic-authorship", "sc-cli", "sc-client-api", @@ -8207,12 +8207,12 @@ dependencies = [ ] [[package]] -name = "minimal-runtime" -version = "0.1.0" +name = "minimal-template-runtime" +version = "0.0.0" dependencies = [ "frame", - "frame-support", "pallet-balances", + "pallet-minimal-template", "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", @@ -8726,50 +8726,6 @@ dependencies = [ "kitchensink-runtime", ] -[[package]] -name = "node-template" -version = "4.0.0-dev" -dependencies = [ - "clap 4.5.1", - "frame-benchmarking", - "frame-benchmarking-cli", - "frame-system", - "futures", - "jsonrpsee", - "node-template-runtime", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc", - "sc-basic-authorship", - "sc-cli", - "sc-client-api", - "sc-consensus", - "sc-consensus-aura", - "sc-consensus-grandpa", - "sc-executor", - "sc-network", - "sc-offchain", - "sc-rpc-api", - "sc-service", - "sc-telemetry", - "sc-transaction-pool", - "sc-transaction-pool-api", - "serde_json", - "sp-api", - "sp-block-builder", - "sp-blockchain", - "sp-consensus-aura", - "sp-consensus-grandpa", - "sp-core", - "sp-inherents", - "sp-io", - "sp-keyring", - "sp-runtime", - "sp-timestamp", - "substrate-build-script-utils", - "substrate-frame-rpc-system", - "try-runtime-cli", -] - [[package]] name = "node-template-release" version = "3.0.0" @@ -8784,45 +8740,6 @@ dependencies = [ "toml_edit 0.19.15", ] -[[package]] -name = "node-template-runtime" -version = "4.0.0-dev" -dependencies = [ - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "pallet-aura", - "pallet-balances", - "pallet-grandpa", - "pallet-sudo", - "pallet-template", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec", - "scale-info", - "serde_json", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-consensus-grandpa", - "sp-core", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std 14.0.0", - "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "substrate-wasm-builder", -] - [[package]] name = "node-testing" version = "3.0.0-dev" @@ -10302,6 +10219,15 @@ dependencies = [ "sp-version", ] +[[package]] +name = "pallet-minimal-template" +version = "0.0.0" +dependencies = [ + "frame", + "parity-scale-codec", + "scale-info", +] + [[package]] name = "pallet-mixnet" version = "0.4.0" @@ -10603,14 +10529,13 @@ dependencies = [ [[package]] name = "pallet-parachain-template" -version = "0.7.0" +version = "0.0.0" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "parity-scale-codec", "scale-info", - "serde", "sp-core", "sp-io", "sp-runtime", @@ -11065,7 +10990,7 @@ dependencies = [ [[package]] name = "pallet-template" -version = "4.0.0-dev" +version = "0.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -11075,7 +11000,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -11389,7 +11313,7 @@ dependencies = [ [[package]] name = "parachain-template-node" -version = "0.1.0" +version = "0.0.0" dependencies = [ "clap 4.5.1", "color-print", @@ -11447,7 +11371,7 @@ dependencies = [ [[package]] name = "parachain-template-runtime" -version = "0.7.0" +version = "0.0.0" dependencies = [ "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", @@ -18356,6 +18280,87 @@ dependencies = [ "sha-1 0.9.8", ] +[[package]] +name = "solochain-template-node" +version = "0.0.0" +dependencies = [ + "clap 4.5.1", + "frame-benchmarking-cli", + "frame-system", + "futures", + "jsonrpsee", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-aura", + "sc-consensus-grandpa", + "sc-executor", + "sc-network", + "sc-offchain", + "sc-rpc-api", + "sc-service", + "sc-telemetry", + "sc-transaction-pool", + "sc-transaction-pool-api", + "serde_json", + "solochain-template-runtime", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus-aura", + "sp-consensus-grandpa", + "sp-core", + "sp-inherents", + "sp-io", + "sp-keyring", + "sp-runtime", + "sp-timestamp", + "substrate-build-script-utils", + "substrate-frame-rpc-system", + "try-runtime-cli", +] + +[[package]] +name = "solochain-template-runtime" +version = "0.0.0" +dependencies = [ + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "pallet-aura", + "pallet-balances", + "pallet-grandpa", + "pallet-sudo", + "pallet-template", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-consensus-grandpa", + "sp-core", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 14.0.0", + "sp-storage 19.0.0", + "sp-transaction-pool", + "sp-version", + "substrate-wasm-builder", +] + [[package]] name = "sp-api" version = "26.0.0" diff --git a/Cargo.toml b/Cargo.toml index 48aa25f5c5a..a2e22e0b927 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ authors = ["Parity Technologies "] edition = "2021" repository = "https://github.com/paritytech/polkadot-sdk.git" license = "GPL-3.0-only" +homepage = "https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/index.html" [workspace] resolver = "2" @@ -74,9 +75,6 @@ members = [ "cumulus/pallets/solo-to-para", "cumulus/pallets/xcm", "cumulus/pallets/xcmp-queue", - "cumulus/parachain-template/node", - "cumulus/parachain-template/pallets/template", - "cumulus/parachain-template/runtime", "cumulus/parachains/common", "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo", "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend", @@ -219,11 +217,6 @@ members = [ "polkadot/xcm/xcm-simulator", "polkadot/xcm/xcm-simulator/example", "polkadot/xcm/xcm-simulator/fuzzer", - "substrate/bin/minimal/node", - "substrate/bin/minimal/runtime", - "substrate/bin/node-template/node", - "substrate/bin/node-template/pallets/template", - "substrate/bin/node-template/runtime", "substrate/bin/node/bench", "substrate/bin/node/cli", "substrate/bin/node/inspect", @@ -504,6 +497,18 @@ members = [ "substrate/utils/frame/try-runtime/cli", "substrate/utils/prometheus", "substrate/utils/wasm-builder", + + "templates/minimal/node", + "templates/minimal/pallets/template", + "templates/minimal/runtime", + + "templates/solochain/node", + "templates/solochain/pallets/template", + "templates/solochain/runtime", + + "templates/parachain/node", + "templates/parachain/pallets/template", + "templates/parachain/runtime", ] default-members = ["polkadot", "substrate/bin/node/cli"] diff --git a/cumulus/parachain-template/pallets/template/README.md b/cumulus/parachain-template/pallets/template/README.md deleted file mode 100644 index 5a646123346..00000000000 --- a/cumulus/parachain-template/pallets/template/README.md +++ /dev/null @@ -1 +0,0 @@ -License: Unlicense diff --git a/cumulus/parachain-template/pallets/template/src/benchmarking.rs b/cumulus/parachain-template/pallets/template/src/benchmarking.rs deleted file mode 100644 index 8bba2a09867..00000000000 --- a/cumulus/parachain-template/pallets/template/src/benchmarking.rs +++ /dev/null @@ -1,20 +0,0 @@ -//! Benchmarking setup for pallet-parachain-template - -use super::*; - -#[allow(unused)] -use crate::Pallet as Template; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; -use frame_system::RawOrigin; - -benchmarks! { - do_something { - let s in 0 .. 100; - let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), s) - verify { - assert_eq!(Something::::get(), Some(s)); - } -} - -impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test,); diff --git a/prdoc/pr_3460.prdoc b/prdoc/pr_3460.prdoc new file mode 100644 index 00000000000..1f16fe08908 --- /dev/null +++ b/prdoc/pr_3460.prdoc @@ -0,0 +1,26 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Repot all templates + +doc: + - audience: Runtime Dev + description: | + This PR moves all templates into a single folder in the polkadot-sdk repo (`/templates`) and + unifies their crate names as well. Most notably, the crate name for what was formerly known + as `node-template` is no `solochain-template-node`. The other two crates in the template are + consequently called: `solochain-runtime-template` and `pallet-solochain-template`. + The other two template crate names follow a similar patter, just replacing `solochain` with + `parachain` or `minimal`. + + This PR is part of a bigger step toward automating the template repositories, see the + following: https://github.com/paritytech/polkadot-sdk/issues/3155 + +# the following crates are removed and renamed, although none are released. +crates: + - name: minimal-template-runtime # formerly called minimal-runtime + - name: minimal-template-node # formerly called minimal-node + - name: solochain-template-node # formerly called node-template + - name: solochain-template-runtime # formerly called node-template-runtime + - name: parachain-template-runtime # formerly called parachain-runtime + - name: parachain-template-runtime # formerly called parachain-node diff --git a/substrate/bin/minimal/node/Cargo.toml b/substrate/bin/minimal/node/Cargo.toml deleted file mode 100644 index 65644861c9a..00000000000 --- a/substrate/bin/minimal/node/Cargo.toml +++ /dev/null @@ -1,60 +0,0 @@ -[package] -name = "minimal-node" -version = "4.0.0-dev" -description = "A fresh FRAME-based Substrate node, ready for hacking." -authors = ["Substrate DevHub "] -homepage = "https://substrate.io/" -edition = "2021" -license = "MIT-0" -publish = false -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" -build = "build.rs" - -[lints] -workspace = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[[bin]] -name = "minimal-node" - -[dependencies] -clap = { version = "4.5.1", features = ["derive"] } -futures = { version = "0.3.21", features = ["thread-pool"] } -futures-timer = "3.0.1" -jsonrpsee = { version = "0.22", features = ["server"] } -serde_json = { workspace = true, default-features = true } - -sc-cli = { path = "../../../client/cli" } -sc-executor = { path = "../../../client/executor" } -sc-network = { path = "../../../client/network" } -sc-service = { path = "../../../client/service" } -sc-telemetry = { path = "../../../client/telemetry" } -sc-transaction-pool = { path = "../../../client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -sc-consensus = { path = "../../../client/consensus/common" } -sc-consensus-manual-seal = { path = "../../../client/consensus/manual-seal" } -sc-rpc-api = { path = "../../../client/rpc-api" } -sc-basic-authorship = { path = "../../../client/basic-authorship" } -sc-offchain = { path = "../../../client/offchain" } -sc-client-api = { path = "../../../client/api" } - -sp-timestamp = { path = "../../../primitives/timestamp" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-api = { path = "../../../primitives/api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sp-io = { path = "../../../primitives/io" } -sp-runtime = { path = "../../../primitives/runtime" } - -substrate-frame-rpc-system = { path = "../../../utils/frame/rpc/system" } - -frame = { path = "../../../frame", features = ["experimental", "runtime"] } -runtime = { package = "minimal-runtime", path = "../runtime" } - -[build-dependencies] -substrate-build-script-utils = { path = "../../../utils/build-script-utils" } - -[features] -default = [] diff --git a/substrate/bin/minimal/runtime/Cargo.toml b/substrate/bin/minimal/runtime/Cargo.toml deleted file mode 100644 index 296106544bb..00000000000 --- a/substrate/bin/minimal/runtime/Cargo.toml +++ /dev/null @@ -1,50 +0,0 @@ -[package] -name = "minimal-runtime" -version = "0.1.0" -authors.workspace = true -description = "A minimal Substrate example runtime" -edition.workspace = true -repository.workspace = true -license.workspace = true -publish = false - -[lints] -workspace = true - -[dependencies] -parity-scale-codec = { version = "3.0.0", default-features = false } -scale-info = { version = "2.6.0", default-features = false } - -# this is a frame-based runtime, thus importing `frame` with runtime feature enabled. -frame = { path = "../../../frame", default-features = false, features = ["experimental", "runtime"] } -frame-support = { path = "../../../frame/support", default-features = false } - -# pallets that we want to use -pallet-balances = { path = "../../../frame/balances", default-features = false } -pallet-sudo = { path = "../../../frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false } - -# genesis builder that allows us to interacto with runtime genesis config -sp-genesis-builder = { path = "../../../primitives/genesis-builder", default-features = false } - - -[build-dependencies] -substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } - -[features] -default = ["std"] -std = [ - "frame-support/std", - "frame/std", - "pallet-balances/std", - "pallet-sudo/std", - "pallet-timestamp/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "parity-scale-codec/std", - "scale-info/std", - "sp-genesis-builder/std", - "substrate-wasm-builder", -] diff --git a/substrate/bin/node-template/.editorconfig b/substrate/bin/node-template/.editorconfig deleted file mode 100644 index 5adac74ca24..00000000000 --- a/substrate/bin/node-template/.editorconfig +++ /dev/null @@ -1,16 +0,0 @@ -root = true - -[*] -indent_style=space -indent_size=2 -tab_width=2 -end_of_line=lf -charset=utf-8 -trim_trailing_whitespace=true -insert_final_newline = true - -[*.{rs,toml}] -indent_style=tab -indent_size=tab -tab_width=4 -max_line_length=100 diff --git a/substrate/bin/node-template/env-setup/.envrc b/substrate/bin/node-template/env-setup/.envrc deleted file mode 100644 index 3550a30f2de..00000000000 --- a/substrate/bin/node-template/env-setup/.envrc +++ /dev/null @@ -1 +0,0 @@ -use flake diff --git a/substrate/bin/node-template/node/Cargo.toml b/substrate/bin/node-template/node/Cargo.toml deleted file mode 100644 index 2f31947e492..00000000000 --- a/substrate/bin/node-template/node/Cargo.toml +++ /dev/null @@ -1,93 +0,0 @@ -[package] -name = "node-template" -version = "4.0.0-dev" -description = "A fresh FRAME-based Substrate node, ready for hacking." -authors = ["Substrate DevHub "] -homepage = "https://substrate.io/" -edition.workspace = true -license = "MIT-0" -publish = false -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" -build = "build.rs" - -[lints] -workspace = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[[bin]] -name = "node-template" - -[dependencies] -clap = { version = "4.5.1", features = ["derive"] } -futures = { version = "0.3.21", features = ["thread-pool"] } -serde_json = { workspace = true, default-features = true } - -sc-cli = { path = "../../../client/cli" } -sp-core = { path = "../../../primitives/core" } -sc-executor = { path = "../../../client/executor" } -sc-network = { path = "../../../client/network" } -sc-service = { path = "../../../client/service" } -sc-telemetry = { path = "../../../client/telemetry" } -sc-transaction-pool = { path = "../../../client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -sc-offchain = { path = "../../../client/offchain" } -sc-consensus-aura = { path = "../../../client/consensus/aura" } -sp-consensus-aura = { path = "../../../primitives/consensus/aura" } -sc-consensus = { path = "../../../client/consensus/common" } -sc-consensus-grandpa = { path = "../../../client/consensus/grandpa" } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa" } -sc-client-api = { path = "../../../client/api" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-io = { path = "../../../primitives/io" } -sp-timestamp = { path = "../../../primitives/timestamp" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-keyring = { path = "../../../primitives/keyring" } -frame-system = { path = "../../../frame/system" } -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } - -# These dependencies are used for the node template's RPCs -jsonrpsee = { version = "0.22", features = ["server"] } -sp-api = { path = "../../../primitives/api" } -sc-rpc-api = { path = "../../../client/rpc-api" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sc-basic-authorship = { path = "../../../client/basic-authorship" } -substrate-frame-rpc-system = { path = "../../../utils/frame/rpc/system" } -pallet-transaction-payment-rpc = { path = "../../../frame/transaction-payment/rpc" } - -# These dependencies are used for runtime benchmarking -frame-benchmarking = { path = "../../../frame/benchmarking" } -frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli" } - -# Local Dependencies -node-template-runtime = { path = "../runtime" } - -# CLI-specific dependencies -try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true } - -[build-dependencies] -substrate-build-script-utils = { path = "../../../utils/build-script-utils" } - -[features] -default = [] -# Dependencies that are only required if runtime benchmarking should be build. -runtime-benchmarks = [ - "frame-benchmarking-cli/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "node-template-runtime/runtime-benchmarks", - "pallet-transaction-payment/runtime-benchmarks", - "sc-service/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] -# Enable features that allow the runtime to be tried and debugged. Name might be subject to change -# in the near future. -try-runtime = [ - "frame-system/try-runtime", - "node-template-runtime/try-runtime", - "pallet-transaction-payment/try-runtime", - "sp-runtime/try-runtime", - "try-runtime-cli/try-runtime", -] diff --git a/substrate/bin/node-template/runtime/Cargo.toml b/substrate/bin/node-template/runtime/Cargo.toml deleted file mode 100644 index c7cffa568db..00000000000 --- a/substrate/bin/node-template/runtime/Cargo.toml +++ /dev/null @@ -1,126 +0,0 @@ -[package] -name = "node-template-runtime" -version = "4.0.0-dev" -description = "A fresh FRAME-based Substrate node, ready for hacking." -authors = ["Substrate DevHub "] -homepage = "https://substrate.io/" -edition.workspace = true -license = "MIT-0" -publish = false -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" - -[lints] -workspace = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } - -pallet-aura = { path = "../../../frame/aura", default-features = false } -pallet-balances = { path = "../../../frame/balances", default-features = false } -frame-support = { path = "../../../frame/support", default-features = false } -pallet-grandpa = { path = "../../../frame/grandpa", default-features = false } -pallet-sudo = { path = "../../../frame/sudo", default-features = false } -frame-system = { path = "../../../frame/system", default-features = false } -frame-try-runtime = { path = "../../../frame/try-runtime", default-features = false, optional = true } -pallet-timestamp = { path = "../../../frame/timestamp", default-features = false } -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } -frame-executive = { path = "../../../frame/executive", default-features = false } -sp-api = { path = "../../../primitives/api", default-features = false } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false } -sp-consensus-aura = { path = "../../../primitives/consensus/aura", default-features = false, features = ["serde"] } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-core = { path = "../../../primitives/core", default-features = false, features = ["serde"] } -sp-inherents = { path = "../../../primitives/inherents", default-features = false } -sp-offchain = { path = "../../../primitives/offchain", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../../primitives/session", default-features = false } -sp-std = { path = "../../../primitives/std", default-features = false } -sp-storage = { path = "../../../primitives/storage", default-features = false } -sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false } -sp-version = { path = "../../../primitives/version", default-features = false, features = ["serde"] } -serde_json = { features = ["alloc"], workspace = true } -sp-genesis-builder = { default-features = false, path = "../../../primitives/genesis-builder" } - -# Used for the node template's RPCs -frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false } - -# Used for runtime benchmarking -frame-benchmarking = { path = "../../../frame/benchmarking", default-features = false, optional = true } -frame-system-benchmarking = { path = "../../../frame/system/benchmarking", default-features = false, optional = true } - -# Local Dependencies -pallet-template = { path = "../pallets/template", default-features = false } - -[build-dependencies] -substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-benchmarking?/std", - "frame-executive/std", - "frame-support/std", - "frame-system-benchmarking?/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "frame-try-runtime?/std", - "pallet-aura/std", - "pallet-balances/std", - "pallet-grandpa/std", - "pallet-sudo/std", - "pallet-template/std", - "pallet-timestamp/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "scale-info/std", - "serde_json/std", - "sp-api/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-consensus-grandpa/std", - "sp-core/std", - "sp-genesis-builder/std", - "sp-inherents/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-std/std", - "sp-storage/std", - "sp-transaction-pool/std", - "sp-version/std", - "substrate-wasm-builder", -] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-grandpa/runtime-benchmarks", - "pallet-sudo/runtime-benchmarks", - "pallet-template/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "pallet-transaction-payment/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] -try-runtime = [ - "frame-executive/try-runtime", - "frame-support/try-runtime", - "frame-system/try-runtime", - "frame-try-runtime/try-runtime", - "pallet-aura/try-runtime", - "pallet-balances/try-runtime", - "pallet-grandpa/try-runtime", - "pallet-sudo/try-runtime", - "pallet-template/try-runtime", - "pallet-timestamp/try-runtime", - "pallet-transaction-payment/try-runtime", - "sp-runtime/try-runtime", -] -experimental = ["pallet-aura/experimental"] diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 3f148bf4c83..6746723e72f 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -19,8 +19,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # external deps -parity-scale-codec = { version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.6.0", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.2.2", default-features = false, features = [ + "derive", +] } +scale-info = { version = "2.6.0", default-features = false, features = [ + "derive", +] } # primitive deps, used for developing FRAME pallets. sp-runtime = { default-features = false, path = "../primitives/runtime" } @@ -57,8 +61,6 @@ pallet-examples = { path = "./examples" } default = ["runtime", "std"] experimental = ["frame-support/experimental"] runtime = [ - "frame-executive", - "frame-system-rpc-runtime-api", "sp-api", "sp-block-builder", "sp-consensus-aura", @@ -68,6 +70,9 @@ runtime = [ "sp-session", "sp-transaction-pool", "sp-version", + + "frame-executive", + "frame-system-rpc-runtime-api", ] std = [ "frame-executive?/std", diff --git a/substrate/frame/examples/default-config/src/lib.rs b/substrate/frame/examples/default-config/src/lib.rs index 69eca055965..224faf9dfd4 100644 --- a/substrate/frame/examples/default-config/src/lib.rs +++ b/substrate/frame/examples/default-config/src/lib.rs @@ -108,7 +108,7 @@ pub mod pallet { } /// A type providing default configurations for this pallet in another environment. Examples - /// could be a parachain, or a solo-chain. + /// could be a parachain, or a solochain. /// /// Appropriate derive for `frame_system::DefaultConfig` needs to be provided. In this /// example, we simple derive `frame_system::config_preludes::TestDefaultConfig` again. diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 21393cb3e20..0318f77342e 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -310,7 +310,7 @@ pub mod pallet { type PostTransactions = (); } - /// Default configurations of this pallet in a solo-chain environment. + /// Default configurations of this pallet in a solochain environment. /// /// ## Considerations: /// diff --git a/templates/minimal/README.md b/templates/minimal/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml new file mode 100644 index 00000000000..410cfc9f6c3 --- /dev/null +++ b/templates/minimal/node/Cargo.toml @@ -0,0 +1,62 @@ +[package] +name = "minimal-template-node" +description = "A miniaml Substrate-based Substrate node, ready for hacking." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false +build = "build.rs" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +clap = { version = "4.5.1", features = ["derive"] } +futures = { version = "0.3.21", features = ["thread-pool"] } +futures-timer = "3.0.1" +jsonrpsee = { version = "0.22", features = ["server"] } +serde_json = { workspace = true, default-features = true } + +sc-cli = { path = "../../../substrate/client/cli" } +sc-executor = { path = "../../../substrate/client/executor" } +sc-network = { path = "../../../substrate/client/network" } +sc-service = { path = "../../../substrate/client/service" } +sc-telemetry = { path = "../../../substrate/client/telemetry" } +sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } +sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } +sc-consensus = { path = "../../../substrate/client/consensus/common" } +sc-consensus-manual-seal = { path = "../../../substrate/client/consensus/manual-seal" } +sc-rpc-api = { path = "../../../substrate/client/rpc-api" } +sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } +sc-offchain = { path = "../../../substrate/client/offchain" } +sc-client-api = { path = "../../../substrate/client/api" } + +sp-timestamp = { path = "../../../substrate/primitives/timestamp" } +sp-keyring = { path = "../../../substrate/primitives/keyring" } +sp-api = { path = "../../../substrate/primitives/api" } +sp-blockchain = { path = "../../../substrate/primitives/blockchain" } +sp-block-builder = { path = "../../../substrate/primitives/block-builder" } +sp-io = { path = "../../../substrate/primitives/io" } +sp-runtime = { path = "../../../substrate/primitives/runtime" } + +substrate-frame-rpc-system = { path = "../../../substrate/utils/frame/rpc/system" } + +# Once the native runtime is gone, there should be little to no dependency on FRAME here, and +# certainly no dependency on the runtime. +frame = { path = "../../../substrate/frame", features = [ + "experimental", + "runtime", +] } +runtime = { package = "minimal-template-runtime", path = "../runtime" } + +[build-dependencies] +substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } + +[features] +default = [] diff --git a/substrate/bin/minimal/node/build.rs b/templates/minimal/node/build.rs similarity index 100% rename from substrate/bin/minimal/node/build.rs rename to templates/minimal/node/build.rs diff --git a/substrate/bin/minimal/node/src/chain_spec.rs b/templates/minimal/node/src/chain_spec.rs similarity index 100% rename from substrate/bin/minimal/node/src/chain_spec.rs rename to templates/minimal/node/src/chain_spec.rs diff --git a/substrate/bin/minimal/node/src/cli.rs b/templates/minimal/node/src/cli.rs similarity index 100% rename from substrate/bin/minimal/node/src/cli.rs rename to templates/minimal/node/src/cli.rs diff --git a/substrate/bin/minimal/node/src/command.rs b/templates/minimal/node/src/command.rs similarity index 100% rename from substrate/bin/minimal/node/src/command.rs rename to templates/minimal/node/src/command.rs diff --git a/substrate/bin/minimal/node/src/lib.rs b/templates/minimal/node/src/lib.rs similarity index 94% rename from substrate/bin/minimal/node/src/lib.rs rename to templates/minimal/node/src/lib.rs index c2065def736..cb8ed3bd209 100644 --- a/substrate/bin/minimal/node/src/lib.rs +++ b/templates/minimal/node/src/lib.rs @@ -1,4 +1,4 @@ -// This file is part of Substrate. +// This file is part of Polkadot Sdk. // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 diff --git a/substrate/bin/minimal/node/src/main.rs b/templates/minimal/node/src/main.rs similarity index 100% rename from substrate/bin/minimal/node/src/main.rs rename to templates/minimal/node/src/main.rs diff --git a/substrate/bin/minimal/node/src/rpc.rs b/templates/minimal/node/src/rpc.rs similarity index 100% rename from substrate/bin/minimal/node/src/rpc.rs rename to templates/minimal/node/src/rpc.rs diff --git a/substrate/bin/minimal/node/src/service.rs b/templates/minimal/node/src/service.rs similarity index 100% rename from substrate/bin/minimal/node/src/service.rs rename to templates/minimal/node/src/service.rs diff --git a/templates/minimal/pallets/template/Cargo.toml b/templates/minimal/pallets/template/Cargo.toml new file mode 100644 index 00000000000..9982e5ea53b --- /dev/null +++ b/templates/minimal/pallets/template/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "pallet-minimal-template" +description = "A minimal pallet built with FRAME, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", features = [ + "derive", +], default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } +frame = { path = "../../../../substrate/frame", default-features = false, features = [ + "experimental", + "runtime", +] } + + +[features] +default = ["std"] +std = ["codec/std", "frame/std", "scale-info/std"] diff --git a/templates/minimal/pallets/template/src/lib.rs b/templates/minimal/pallets/template/src/lib.rs new file mode 100644 index 00000000000..713f014bbe6 --- /dev/null +++ b/templates/minimal/pallets/template/src/lib.rs @@ -0,0 +1,19 @@ +//! A shell pallet built with [`frame`]. + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame::prelude::*; + +// Re-export all pallet parts, this is needed to properly import the pallet into the runtime. +pub use pallet::*; + +#[frame::pallet] +pub mod pallet { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} diff --git a/templates/minimal/runtime/Cargo.toml b/templates/minimal/runtime/Cargo.toml new file mode 100644 index 00000000000..20ffb706eb4 --- /dev/null +++ b/templates/minimal/runtime/Cargo.toml @@ -0,0 +1,59 @@ +[package] +name = "minimal-template-runtime" +description = "A solochain runtime template built with Substrate, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +[lints] +workspace = true + +[dependencies] +parity-scale-codec = { version = "3.0.0", default-features = false } +scale-info = { version = "2.6.0", default-features = false } + +# this is a frame-based runtime, thus importing `frame` with runtime feature enabled. +frame = { path = "../../../substrate/frame", default-features = false, features = [ + "experimental", + "runtime", +] } + +# pallets that we want to use +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } +pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } + +# genesis builder that allows us to interacto with runtime genesis config +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } + +# local pallet templates +pallet-minimal-template = { path = "../pallets/template", default-features = false } + +[build-dependencies] +substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "scale-info/std", + + "frame/std", + + "pallet-balances/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + + "pallet-minimal-template/std", + + "sp-genesis-builder/std", + "substrate-wasm-builder", +] diff --git a/substrate/bin/minimal/runtime/build.rs b/templates/minimal/runtime/build.rs similarity index 100% rename from substrate/bin/minimal/runtime/build.rs rename to templates/minimal/runtime/build.rs diff --git a/substrate/bin/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs similarity index 95% rename from substrate/bin/minimal/runtime/src/lib.rs rename to templates/minimal/runtime/src/lib.rs index d3e8a2e8ec0..6920511e276 100644 --- a/substrate/bin/minimal/runtime/src/lib.rs +++ b/templates/minimal/runtime/src/lib.rs @@ -22,7 +22,10 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); use frame::{ - deps::frame_support::weights::{FixedFee, NoFee}, + deps::frame_support::{ + genesis_builder_helper::{build_config, create_default_config}, + weights::{FixedFee, NoFee}, + }, prelude::*, runtime::{ apis::{ @@ -32,12 +35,11 @@ use frame::{ prelude::*, }, }; -use frame_support::genesis_builder_helper::{build_config, create_default_config}; #[runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("minimal-runtime"), - impl_name: create_runtime_str!("minimal-runtime"), + spec_name: create_runtime_str!("minimal-template-runtime"), + impl_name: create_runtime_str!("minimal-template-runtime"), authoring_version: 1, spec_version: 0, impl_version: 1, @@ -71,6 +73,9 @@ construct_runtime!( Balances: pallet_balances, Sudo: pallet_sudo, TransactionPayment: pallet_transaction_payment, + + // our local pallet + Template: pallet_minimal_template, } ); @@ -104,6 +109,8 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = FixedFee<1, ::Balance>; } +impl pallet_minimal_template::Config for Runtime {} + type Block = frame::runtime::types_common::BlockOf; type Header = HeaderFor; diff --git a/cumulus/parachain-template/LICENSE b/templates/parachain/LICENSE similarity index 100% rename from cumulus/parachain-template/LICENSE rename to templates/parachain/LICENSE diff --git a/cumulus/parachain-template/README.md b/templates/parachain/README.md similarity index 100% rename from cumulus/parachain-template/README.md rename to templates/parachain/README.md diff --git a/cumulus/parachain-template/node/Cargo.toml b/templates/parachain/node/Cargo.toml similarity index 78% rename from cumulus/parachain-template/node/Cargo.toml rename to templates/parachain/node/Cargo.toml index 0ef678c4cba..b4f3a504a4d 100644 --- a/cumulus/parachain-template/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -1,18 +1,21 @@ [package] name = "parachain-template-node" -version = "0.1.0" -authors = ["Anonymous"] -description = "A new Cumulus FRAME-based Substrate Node, ready for hacking together a parachain." -license = "Unlicense" -homepage = "https://substrate.io" +description = "A parachain node template built with Substrate and Cumulus, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true repository.workspace = true edition.workspace = true -build = "build.rs" publish = false +build = "build.rs" [lints] workspace = true +# [[bin]] +# name = "parachain-template-node" + [dependencies] clap = { version = "4.5.1", features = ["derive"] } log = { workspace = true, default-features = true } @@ -63,15 +66,15 @@ polkadot-primitives = { path = "../../../polkadot/primitives" } xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } # Cumulus -cumulus-client-cli = { path = "../../client/cli" } -cumulus-client-collator = { path = "../../client/collator" } -cumulus-client-consensus-aura = { path = "../../client/consensus/aura" } -cumulus-client-consensus-common = { path = "../../client/consensus/common" } -cumulus-client-consensus-proposer = { path = "../../client/consensus/proposer" } -cumulus-client-service = { path = "../../client/service" } -cumulus-primitives-core = { path = "../../primitives/core" } -cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } -cumulus-relay-chain-interface = { path = "../../client/relay-chain-interface" } +cumulus-client-cli = { path = "../../../cumulus/client/cli" } +cumulus-client-collator = { path = "../../../cumulus/client/collator" } +cumulus-client-consensus-aura = { path = "../../../cumulus/client/consensus/aura" } +cumulus-client-consensus-common = { path = "../../../cumulus/client/consensus/common" } +cumulus-client-consensus-proposer = { path = "../../../cumulus/client/consensus/proposer" } +cumulus-client-service = { path = "../../../cumulus/client/service" } +cumulus-primitives-core = { path = "../../../cumulus/primitives/core" } +cumulus-primitives-parachain-inherent = { path = "../../../cumulus/primitives/parachain-inherent" } +cumulus-relay-chain-interface = { path = "../../../cumulus/client/relay-chain-interface" } color-print = "0.3.4" [build-dependencies] diff --git a/cumulus/parachain-template/node/build.rs b/templates/parachain/node/build.rs similarity index 100% rename from cumulus/parachain-template/node/build.rs rename to templates/parachain/node/build.rs diff --git a/cumulus/parachain-template/node/src/chain_spec.rs b/templates/parachain/node/src/chain_spec.rs similarity index 93% rename from cumulus/parachain-template/node/src/chain_spec.rs rename to templates/parachain/node/src/chain_spec.rs index a79c78699c0..16c91865cdb 100644 --- a/cumulus/parachain-template/node/src/chain_spec.rs +++ b/templates/parachain/node/src/chain_spec.rs @@ -1,5 +1,6 @@ use cumulus_primitives_core::ParaId; -use parachain_template_runtime::{AccountId, AuraId, Signature, EXISTENTIAL_DEPOSIT}; +use parachain_template_runtime as runtime; +use runtime::{AccountId, AuraId, Signature, EXISTENTIAL_DEPOSIT}; use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}; use sc_service::ChainType; use serde::{Deserialize, Serialize}; @@ -56,8 +57,8 @@ where /// Generate the session keys from individual elements. /// /// The input must be a tuple of individual keys (a single arg for now since we have just one key). -pub fn template_session_keys(keys: AuraId) -> parachain_template_runtime::SessionKeys { - parachain_template_runtime::SessionKeys { aura: keys } +pub fn template_session_keys(keys: AuraId) -> runtime::SessionKeys { + runtime::SessionKeys { aura: keys } } pub fn development_config() -> ChainSpec { @@ -68,8 +69,7 @@ pub fn development_config() -> ChainSpec { properties.insert("ss58Format".into(), 42.into()); ChainSpec::builder( - parachain_template_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), + runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), // You MUST set this to the correct network! @@ -120,8 +120,7 @@ pub fn local_testnet_config() -> ChainSpec { #[allow(deprecated)] ChainSpec::builder( - parachain_template_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), + runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), // You MUST set this to the correct network! diff --git a/cumulus/parachain-template/node/src/cli.rs b/templates/parachain/node/src/cli.rs similarity index 100% rename from cumulus/parachain-template/node/src/cli.rs rename to templates/parachain/node/src/cli.rs diff --git a/cumulus/parachain-template/node/src/command.rs b/templates/parachain/node/src/command.rs similarity index 100% rename from cumulus/parachain-template/node/src/command.rs rename to templates/parachain/node/src/command.rs diff --git a/cumulus/parachain-template/node/src/main.rs b/templates/parachain/node/src/main.rs similarity index 100% rename from cumulus/parachain-template/node/src/main.rs rename to templates/parachain/node/src/main.rs diff --git a/cumulus/parachain-template/node/src/rpc.rs b/templates/parachain/node/src/rpc.rs similarity index 100% rename from cumulus/parachain-template/node/src/rpc.rs rename to templates/parachain/node/src/rpc.rs diff --git a/cumulus/parachain-template/node/src/service.rs b/templates/parachain/node/src/service.rs similarity index 100% rename from cumulus/parachain-template/node/src/service.rs rename to templates/parachain/node/src/service.rs diff --git a/cumulus/parachain-template/pallets/template/Cargo.toml b/templates/parachain/pallets/template/Cargo.toml similarity index 68% rename from cumulus/parachain-template/pallets/template/Cargo.toml rename to templates/parachain/pallets/template/Cargo.toml index 04858a161fa..89eb9d51716 100644 --- a/cumulus/parachain-template/pallets/template/Cargo.toml +++ b/templates/parachain/pallets/template/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-parachain-template" -authors = ["Anonymous"] description = "FRAME pallet template for defining custom runtime logic." -version = "0.7.0" -license = "Unlicense" -homepage = "https://substrate.io" +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true repository.workspace = true edition.workspace = true +publish = false [lints] workspace = true @@ -15,21 +16,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", +] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } -# Substrate +# frame deps frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../substrate/frame/system", default-features = false } [dev-dependencies] -serde = { workspace = true, default-features = true } - -# Substrate -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } -sp-io = { path = "../../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +sp-core = { path = "../../../../substrate/primitives/core" } +sp-io = { path = "../../../../substrate/primitives/io" } +sp-runtime = { path = "../../../../substrate/primitives/runtime" } [features] default = ["std"] @@ -41,7 +43,7 @@ runtime-benchmarks = [ ] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "scale-info/std", diff --git a/substrate/bin/node-template/pallets/template/README.md b/templates/parachain/pallets/template/README.md similarity index 100% rename from substrate/bin/node-template/pallets/template/README.md rename to templates/parachain/pallets/template/README.md diff --git a/substrate/bin/node-template/pallets/template/src/benchmarking.rs b/templates/parachain/pallets/template/src/benchmarking.rs similarity index 100% rename from substrate/bin/node-template/pallets/template/src/benchmarking.rs rename to templates/parachain/pallets/template/src/benchmarking.rs diff --git a/cumulus/parachain-template/pallets/template/src/lib.rs b/templates/parachain/pallets/template/src/lib.rs similarity index 96% rename from cumulus/parachain-template/pallets/template/src/lib.rs rename to templates/parachain/pallets/template/src/lib.rs index 24226d6cf40..11587d1df42 100644 --- a/cumulus/parachain-template/pallets/template/src/lib.rs +++ b/templates/parachain/pallets/template/src/lib.rs @@ -11,6 +11,8 @@ mod mock; #[cfg(test)] mod tests; +pub mod weights; + #[cfg(feature = "runtime-benchmarks")] mod benchmarking; @@ -24,6 +26,8 @@ pub mod pallet { pub trait Config: frame_system::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// A type representing the weights required by the dispatchables of this pallet. + type WeightInfo: crate::weights::WeightInfo; } #[pallet::pallet] diff --git a/cumulus/parachain-template/pallets/template/src/mock.rs b/templates/parachain/pallets/template/src/mock.rs similarity index 98% rename from cumulus/parachain-template/pallets/template/src/mock.rs rename to templates/parachain/pallets/template/src/mock.rs index 411a16b116c..f510a8b773a 100644 --- a/cumulus/parachain-template/pallets/template/src/mock.rs +++ b/templates/parachain/pallets/template/src/mock.rs @@ -51,6 +51,7 @@ impl system::Config for Test { impl crate::Config for Test { type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); } // Build genesis storage according to the mock runtime. diff --git a/cumulus/parachain-template/pallets/template/src/tests.rs b/templates/parachain/pallets/template/src/tests.rs similarity index 100% rename from cumulus/parachain-template/pallets/template/src/tests.rs rename to templates/parachain/pallets/template/src/tests.rs diff --git a/substrate/bin/node-template/pallets/template/src/weights.rs b/templates/parachain/pallets/template/src/weights.rs similarity index 100% rename from substrate/bin/node-template/pallets/template/src/weights.rs rename to templates/parachain/pallets/template/src/weights.rs diff --git a/cumulus/parachain-template/polkadot-launch/config.json b/templates/parachain/polkadot-launch/config.json similarity index 100% rename from cumulus/parachain-template/polkadot-launch/config.json rename to templates/parachain/polkadot-launch/config.json diff --git a/cumulus/parachain-template/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml similarity index 82% rename from cumulus/parachain-template/runtime/Cargo.toml rename to templates/parachain/runtime/Cargo.toml index 60d8d6f5136..44e9341b568 100644 --- a/cumulus/parachain-template/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "parachain-template-runtime" -version = "0.7.0" -authors = ["Anonymous"] -description = "A new Cumulus FRAME-based Substrate Runtime, ready for hacking together a parachain." -license = "Unlicense" -homepage = "https://substrate.io" +description = "A parachain runtime template built with Substrate and Cumulus, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true repository.workspace = true edition.workspace = true +publish = false [lints] workspace = true @@ -18,16 +19,20 @@ targets = ["x86_64-unknown-linux-gnu"] substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ + "derive", +] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } smallvec = "1.11.0" # Local pallet-parachain-template = { path = "../pallets/template", default-features = false } -# Substrate +# Substrate / FRAME frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } frame-support = { path = "../../../substrate/frame/support", default-features = false } @@ -35,6 +40,8 @@ frame-system = { path = "../../../substrate/frame/system", default-features = fa frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } + +# FRAME Pallets pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } @@ -44,6 +51,8 @@ pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } + +# Substrate Primitives sp-api = { path = "../../../substrate/primitives/api", default-features = false } sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } @@ -66,17 +75,19 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/x xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus -cumulus-pallet-aura-ext = { path = "../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } -cumulus-pallet-session-benchmarking = { path = "../../pallets/session-benchmarking", default-features = false } -cumulus-pallet-xcm = { path = "../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-core = { path = "../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../primitives/utility", default-features = false } -cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim", default-features = false } -pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } -parachains-common = { path = "../../parachains/common", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../parachains/pallets/parachain-info", default-features = false } +cumulus-pallet-aura-ext = { path = "../../../cumulus/pallets/aura-ext", default-features = false } +cumulus-pallet-parachain-system = { path = "../../../cumulus/pallets/parachain-system", default-features = false, features = [ + "parameterized-consensus-hook", +] } +cumulus-pallet-session-benchmarking = { path = "../../../cumulus/pallets/session-benchmarking", default-features = false } +cumulus-pallet-xcm = { path = "../../../cumulus/pallets/xcm", default-features = false } +cumulus-pallet-xcmp-queue = { path = "../../../cumulus/pallets/xcmp-queue", default-features = false } +cumulus-primitives-core = { path = "../../../cumulus/primitives/core", default-features = false } +cumulus-primitives-utility = { path = "../../../cumulus/primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../../cumulus/primitives/storage-weight-reclaim", default-features = false } +pallet-collator-selection = { path = "../../../cumulus/pallets/collator-selection", default-features = false } +parachains-common = { path = "../../../cumulus/parachains/common", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../cumulus/parachains/pallets/parachain-info", default-features = false } [features] default = ["std"] diff --git a/cumulus/parachain-template/runtime/build.rs b/templates/parachain/runtime/build.rs similarity index 100% rename from cumulus/parachain-template/runtime/build.rs rename to templates/parachain/runtime/build.rs diff --git a/cumulus/parachain-template/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs similarity index 99% rename from cumulus/parachain-template/runtime/src/lib.rs rename to templates/parachain/runtime/src/lib.rs index f7754e594e6..74ecd751f67 100644 --- a/cumulus/parachain-template/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -180,8 +180,8 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("template-parachain"), - impl_name: create_runtime_str!("template-parachain"), + spec_name: create_runtime_str!("parachain-template-runtime"), + impl_name: create_runtime_str!("parachain-template-runtime"), authoring_version: 1, spec_version: 1, impl_version: 0, @@ -490,6 +490,7 @@ impl pallet_collator_selection::Config for Runtime { /// Configure the pallet template in pallets/template. impl pallet_parachain_template::Config for Runtime { type RuntimeEvent = RuntimeEvent; + type WeightInfo = pallet_parachain_template::weights::SubstrateWeight; } // Create the runtime by composing the FRAME pallets that were previously configured. diff --git a/cumulus/parachain-template/runtime/src/weights/block_weights.rs b/templates/parachain/runtime/src/weights/block_weights.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/weights/block_weights.rs rename to templates/parachain/runtime/src/weights/block_weights.rs diff --git a/cumulus/parachain-template/runtime/src/weights/extrinsic_weights.rs b/templates/parachain/runtime/src/weights/extrinsic_weights.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/weights/extrinsic_weights.rs rename to templates/parachain/runtime/src/weights/extrinsic_weights.rs diff --git a/cumulus/parachain-template/runtime/src/weights/mod.rs b/templates/parachain/runtime/src/weights/mod.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/weights/mod.rs rename to templates/parachain/runtime/src/weights/mod.rs diff --git a/cumulus/parachain-template/runtime/src/weights/paritydb_weights.rs b/templates/parachain/runtime/src/weights/paritydb_weights.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/weights/paritydb_weights.rs rename to templates/parachain/runtime/src/weights/paritydb_weights.rs diff --git a/cumulus/parachain-template/runtime/src/weights/rocksdb_weights.rs b/templates/parachain/runtime/src/weights/rocksdb_weights.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/weights/rocksdb_weights.rs rename to templates/parachain/runtime/src/weights/rocksdb_weights.rs diff --git a/cumulus/parachain-template/runtime/src/xcm_config.rs b/templates/parachain/runtime/src/xcm_config.rs similarity index 100% rename from cumulus/parachain-template/runtime/src/xcm_config.rs rename to templates/parachain/runtime/src/xcm_config.rs diff --git a/substrate/bin/node-template/LICENSE b/templates/solochain/LICENSE similarity index 100% rename from substrate/bin/node-template/LICENSE rename to templates/solochain/LICENSE diff --git a/substrate/bin/node-template/README.md b/templates/solochain/README.md similarity index 100% rename from substrate/bin/node-template/README.md rename to templates/solochain/README.md diff --git a/substrate/bin/node-template/docs/rust-setup.md b/templates/solochain/docs/rust-setup.md similarity index 100% rename from substrate/bin/node-template/docs/rust-setup.md rename to templates/solochain/docs/rust-setup.md diff --git a/substrate/bin/node-template/env-setup/README.md b/templates/solochain/env-setup/README.md similarity index 100% rename from substrate/bin/node-template/env-setup/README.md rename to templates/solochain/env-setup/README.md diff --git a/substrate/bin/node-template/env-setup/flake.lock b/templates/solochain/env-setup/flake.lock similarity index 100% rename from substrate/bin/node-template/env-setup/flake.lock rename to templates/solochain/env-setup/flake.lock diff --git a/substrate/bin/node-template/env-setup/flake.nix b/templates/solochain/env-setup/flake.nix similarity index 100% rename from substrate/bin/node-template/env-setup/flake.nix rename to templates/solochain/env-setup/flake.nix diff --git a/substrate/bin/node-template/env-setup/rust-toolchain.toml b/templates/solochain/env-setup/rust-toolchain.toml similarity index 100% rename from substrate/bin/node-template/env-setup/rust-toolchain.toml rename to templates/solochain/env-setup/rust-toolchain.toml diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml new file mode 100644 index 00000000000..5e50e6106c1 --- /dev/null +++ b/templates/solochain/node/Cargo.toml @@ -0,0 +1,92 @@ +[package] +name = "solochain-template-node" +description = "A solochain node template built with Substrate, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +build = "build.rs" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +clap = { version = "4.5.1", features = ["derive"] } +futures = { version = "0.3.21", features = ["thread-pool"] } +serde_json = { workspace = true, default-features = true } +jsonrpsee = { version = "0.22", features = ["server"] } + +# substrate client +sc-cli = { path = "../../../substrate/client/cli" } +sp-core = { path = "../../../substrate/primitives/core" } +sc-executor = { path = "../../../substrate/client/executor" } +sc-network = { path = "../../../substrate/client/network" } +sc-service = { path = "../../../substrate/client/service" } +sc-telemetry = { path = "../../../substrate/client/telemetry" } +sc-transaction-pool = { path = "../../../substrate/client/transaction-pool" } +sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } +sc-offchain = { path = "../../../substrate/client/offchain" } +sc-consensus-aura = { path = "../../../substrate/client/consensus/aura" } +sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura" } +sc-consensus = { path = "../../../substrate/client/consensus/common" } +sc-consensus-grandpa = { path = "../../../substrate/client/consensus/grandpa" } +sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa" } +sc-client-api = { path = "../../../substrate/client/api" } +sc-rpc-api = { path = "../../../substrate/client/rpc-api" } +sc-basic-authorship = { path = "../../../substrate/client/basic-authorship" } + +# substrate primitives +sp-runtime = { path = "../../../substrate/primitives/runtime" } +sp-io = { path = "../../../substrate/primitives/io" } +sp-timestamp = { path = "../../../substrate/primitives/timestamp" } +sp-inherents = { path = "../../../substrate/primitives/inherents" } +sp-keyring = { path = "../../../substrate/primitives/keyring" } +sp-api = { path = "../../../substrate/primitives/api" } +sp-blockchain = { path = "../../../substrate/primitives/blockchain" } +sp-block-builder = { path = "../../../substrate/primitives/block-builder" } + +# frame and pallets +frame-system = { path = "../../../substrate/frame/system" } +pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc = { path = "../../../substrate/frame/transaction-payment/rpc" } +substrate-frame-rpc-system = { path = "../../../substrate/utils/frame/rpc/system" } + +# These dependencies are used for runtime benchmarking +frame-benchmarking-cli = { path = "../../../substrate/utils/frame/benchmarking-cli" } + +# Local Dependencies +solochain-template-runtime = { path = "../runtime" } + +# CLI-specific dependencies +try-runtime-cli = { path = "../../../substrate/utils/frame/try-runtime/cli", optional = true } + +[build-dependencies] +substrate-build-script-utils = { path = "../../../substrate/utils/build-script-utils" } + +[features] +default = [] +# Dependencies that are only required if runtime benchmarking should be build. +runtime-benchmarks = [ + "frame-benchmarking-cli/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", + "sc-service/runtime-benchmarks", + "solochain-template-runtime/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +# Enable features that allow the runtime to be tried and debugged. Name might be subject to change +# in the near future. +try-runtime = [ + "frame-system/try-runtime", + "pallet-transaction-payment/try-runtime", + "solochain-template-runtime/try-runtime", + "sp-runtime/try-runtime", + "try-runtime-cli/try-runtime", +] diff --git a/substrate/bin/node-template/node/build.rs b/templates/solochain/node/build.rs similarity index 100% rename from substrate/bin/node-template/node/build.rs rename to templates/solochain/node/build.rs diff --git a/substrate/bin/node-template/node/src/benchmarking.rs b/templates/solochain/node/src/benchmarking.rs similarity index 99% rename from substrate/bin/node-template/node/src/benchmarking.rs rename to templates/solochain/node/src/benchmarking.rs index eacc367669c..8710f303e1f 100644 --- a/substrate/bin/node-template/node/src/benchmarking.rs +++ b/templates/solochain/node/src/benchmarking.rs @@ -4,10 +4,10 @@ use crate::service::FullClient; -use node_template_runtime as runtime; use runtime::{AccountId, Balance, BalancesCall, SystemCall}; use sc_cli::Result; use sc_client_api::BlockBackend; +use solochain_template_runtime as runtime; use sp_core::{Encode, Pair}; use sp_inherents::{InherentData, InherentDataProvider}; use sp_keyring::Sr25519Keyring; diff --git a/substrate/bin/node-template/node/src/chain_spec.rs b/templates/solochain/node/src/chain_spec.rs similarity index 97% rename from substrate/bin/node-template/node/src/chain_spec.rs rename to templates/solochain/node/src/chain_spec.rs index 6e0d78f647a..be49f2c1fc7 100644 --- a/substrate/bin/node-template/node/src/chain_spec.rs +++ b/templates/solochain/node/src/chain_spec.rs @@ -1,5 +1,5 @@ -use node_template_runtime::{AccountId, RuntimeGenesisConfig, Signature, WASM_BINARY}; use sc_service::ChainType; +use solochain_template_runtime::{AccountId, RuntimeGenesisConfig, Signature, WASM_BINARY}; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{sr25519, Pair, Public}; diff --git a/substrate/bin/node-template/node/src/cli.rs b/templates/solochain/node/src/cli.rs similarity index 100% rename from substrate/bin/node-template/node/src/cli.rs rename to templates/solochain/node/src/cli.rs diff --git a/substrate/bin/node-template/node/src/command.rs b/templates/solochain/node/src/command.rs similarity index 98% rename from substrate/bin/node-template/node/src/command.rs rename to templates/solochain/node/src/command.rs index 3778df66422..42d1477f22f 100644 --- a/substrate/bin/node-template/node/src/command.rs +++ b/templates/solochain/node/src/command.rs @@ -5,9 +5,9 @@ use crate::{ service, }; use frame_benchmarking_cli::{BenchmarkCmd, ExtrinsicFactory, SUBSTRATE_REFERENCE_HARDWARE}; -use node_template_runtime::{Block, EXISTENTIAL_DEPOSIT}; use sc_cli::SubstrateCli; use sc_service::PartialComponents; +use solochain_template_runtime::{Block, EXISTENTIAL_DEPOSIT}; use sp_keyring::Sr25519Keyring; impl SubstrateCli for Cli { diff --git a/substrate/bin/node-template/node/src/main.rs b/templates/solochain/node/src/main.rs similarity index 100% rename from substrate/bin/node-template/node/src/main.rs rename to templates/solochain/node/src/main.rs diff --git a/substrate/bin/node-template/node/src/rpc.rs b/templates/solochain/node/src/rpc.rs similarity index 96% rename from substrate/bin/node-template/node/src/rpc.rs rename to templates/solochain/node/src/rpc.rs index 246391adcbb..fe2b6ca72ed 100644 --- a/substrate/bin/node-template/node/src/rpc.rs +++ b/templates/solochain/node/src/rpc.rs @@ -8,8 +8,8 @@ use std::sync::Arc; use jsonrpsee::RpcModule; -use node_template_runtime::{opaque::Block, AccountId, Balance, Nonce}; use sc_transaction_pool_api::TransactionPool; +use solochain_template_runtime::{opaque::Block, AccountId, Balance, Nonce}; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; diff --git a/substrate/bin/node-template/node/src/service.rs b/templates/solochain/node/src/service.rs similarity index 99% rename from substrate/bin/node-template/node/src/service.rs rename to templates/solochain/node/src/service.rs index 125cca13927..dc25f757912 100644 --- a/substrate/bin/node-template/node/src/service.rs +++ b/templates/solochain/node/src/service.rs @@ -1,13 +1,13 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. use futures::FutureExt; -use node_template_runtime::{self, opaque::Block, RuntimeApi}; use sc_client_api::{Backend, BlockBackend}; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; use sc_consensus_grandpa::SharedVoterState; use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; +use solochain_template_runtime::{self, opaque::Block, RuntimeApi}; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; use std::{sync::Arc, time::Duration}; diff --git a/substrate/bin/node-template/pallets/template/Cargo.toml b/templates/solochain/pallets/template/Cargo.toml similarity index 55% rename from substrate/bin/node-template/pallets/template/Cargo.toml rename to templates/solochain/pallets/template/Cargo.toml index 51410a71c7b..bd234715198 100644 --- a/substrate/bin/node-template/pallets/template/Cargo.toml +++ b/templates/solochain/pallets/template/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "pallet-template" -version = "4.0.0-dev" description = "FRAME pallet template for defining custom runtime logic." -authors = ["Substrate DevHub "] -homepage = "https://substrate.io" -edition.workspace = true +version = "0.0.0" license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true publish = false -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" [lints] workspace = true @@ -19,16 +19,19 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../../../frame/benchmarking", default-features = false, optional = true } -frame-support = { path = "../../../../frame/support", default-features = false } -frame-system = { path = "../../../../frame/system", default-features = false } -sp-std = { path = "../../../../primitives/std", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } + +# frame deps +frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-support = { path = "../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../substrate/frame/system", default-features = false } [dev-dependencies] -sp-core = { path = "../../../../primitives/core" } -sp-io = { path = "../../../../primitives/io" } -sp-runtime = { path = "../../../../primitives/runtime" } +sp-core = { path = "../../../../substrate/primitives/core" } +sp-io = { path = "../../../../substrate/primitives/io" } +sp-runtime = { path = "../../../../substrate/primitives/runtime" } [features] default = ["std"] @@ -41,7 +44,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/templates/solochain/pallets/template/README.md b/templates/solochain/pallets/template/README.md new file mode 100644 index 00000000000..9e4dc55267d --- /dev/null +++ b/templates/solochain/pallets/template/README.md @@ -0,0 +1 @@ +License: MIT-0 diff --git a/templates/solochain/pallets/template/src/benchmarking.rs b/templates/solochain/pallets/template/src/benchmarking.rs new file mode 100644 index 00000000000..5a262417629 --- /dev/null +++ b/templates/solochain/pallets/template/src/benchmarking.rs @@ -0,0 +1,35 @@ +//! Benchmarking setup for pallet-template +#![cfg(feature = "runtime-benchmarks")] +use super::*; + +#[allow(unused)] +use crate::Pallet as Template; +use frame_benchmarking::v2::*; +use frame_system::RawOrigin; + +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn do_something() { + let value = 100u32.into(); + let caller: T::AccountId = whitelisted_caller(); + #[extrinsic_call] + do_something(RawOrigin::Signed(caller), value); + + assert_eq!(Something::::get(), Some(value)); + } + + #[benchmark] + fn cause_error() { + Something::::put(100u32); + let caller: T::AccountId = whitelisted_caller(); + #[extrinsic_call] + cause_error(RawOrigin::Signed(caller)); + + assert_eq!(Something::::get(), Some(101u32)); + } + + impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/substrate/bin/node-template/pallets/template/src/lib.rs b/templates/solochain/pallets/template/src/lib.rs similarity index 100% rename from substrate/bin/node-template/pallets/template/src/lib.rs rename to templates/solochain/pallets/template/src/lib.rs diff --git a/substrate/bin/node-template/pallets/template/src/mock.rs b/templates/solochain/pallets/template/src/mock.rs similarity index 100% rename from substrate/bin/node-template/pallets/template/src/mock.rs rename to templates/solochain/pallets/template/src/mock.rs diff --git a/substrate/bin/node-template/pallets/template/src/tests.rs b/templates/solochain/pallets/template/src/tests.rs similarity index 100% rename from substrate/bin/node-template/pallets/template/src/tests.rs rename to templates/solochain/pallets/template/src/tests.rs diff --git a/templates/solochain/pallets/template/src/weights.rs b/templates/solochain/pallets/template/src/weights.rs new file mode 100644 index 00000000000..7c42936e09f --- /dev/null +++ b/templates/solochain/pallets/template/src/weights.rs @@ -0,0 +1,90 @@ + +//! Autogenerated weights for pallet_template +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Alexs-MacBook-Pro-2.local`, CPU: `` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 + +// Executed Command: +// ../../target/release/node-template +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet_template +// --extrinsic +// * +// --steps=50 +// --repeat=20 +// --wasm-execution=compiled +// --output +// pallets/template/src/weights.rs +// --template +// ../../.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for pallet_template. +pub trait WeightInfo { + fn do_something() -> Weight; + fn cause_error() -> Weight; +} + +/// Weights for pallet_template using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: TemplateModule Something (r:0 w:1) + /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn do_something() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: TemplateModule Something (r:1 w:1) + /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn cause_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1489` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(6_000_000, 1489) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + /// Storage: TemplateModule Something (r:0 w:1) + /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn do_something() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: TemplateModule Something (r:1 w:1) + /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn cause_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1489` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(6_000_000, 1489) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml new file mode 100644 index 00000000000..4f22e7ff6a3 --- /dev/null +++ b/templates/solochain/runtime/Cargo.toml @@ -0,0 +1,152 @@ +[package] +name = "solochain-template-runtime" +description = "A solochain runtime template built with Substrate, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", +] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", + "serde", +] } + +# frame +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } +frame-executive = { path = "../../../substrate/frame/executive", default-features = false } + +# frame pallets +pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } +pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } +pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } + +# primitives +sp-api = { path = "../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false, features = [ + "serde", +] } +sp-consensus-grandpa = { path = "../../../substrate/primitives/consensus/grandpa", default-features = false, features = [ + "serde", +] } +sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = [ + "serde", +] } +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = [ + "serde", +] } +sp-session = { path = "../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../substrate/primitives/version", default-features = false, features = [ + "serde", +] } +sp-genesis-builder = { default-features = false, path = "../../../substrate/primitives/genesis-builder" } + +# RPC related +frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } + +# Used for runtime benchmarking +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } + +# The pallet in this template. +pallet-template = { path = "../pallets/template", default-features = false } + +[build-dependencies] +substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + + "frame-executive/std", + "frame-support/std", + "frame-system-benchmarking?/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + + "frame-benchmarking?/std", + "frame-try-runtime?/std", + + "pallet-aura/std", + "pallet-balances/std", + "pallet-grandpa/std", + "pallet-sudo/std", + "pallet-template/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + + "sp-api/std", + "sp-block-builder/std", + "sp-consensus-aura/std", + "sp-consensus-grandpa/std", + "sp-core/std", + "sp-genesis-builder/std", + "sp-inherents/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-storage/std", + "sp-transaction-pool/std", + "sp-version/std", + + "substrate-wasm-builder", +] + +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-grandpa/runtime-benchmarks", + "pallet-sudo/runtime-benchmarks", + "pallet-template/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] + +try-runtime = [ + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-aura/try-runtime", + "pallet-balances/try-runtime", + "pallet-grandpa/try-runtime", + "pallet-sudo/try-runtime", + "pallet-template/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-transaction-payment/try-runtime", + "sp-runtime/try-runtime", +] + +experimental = ["pallet-aura/experimental"] diff --git a/substrate/bin/node-template/runtime/build.rs b/templates/solochain/runtime/build.rs similarity index 100% rename from substrate/bin/node-template/runtime/build.rs rename to templates/solochain/runtime/build.rs diff --git a/substrate/bin/node-template/runtime/src/lib.rs b/templates/solochain/runtime/src/lib.rs similarity index 98% rename from substrate/bin/node-template/runtime/src/lib.rs rename to templates/solochain/runtime/src/lib.rs index ee622a691b4..409b639f021 100644 --- a/substrate/bin/node-template/runtime/src/lib.rs +++ b/templates/solochain/runtime/src/lib.rs @@ -1,8 +1,5 @@ #![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit = "256"] -// Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); @@ -22,7 +19,6 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use frame_support::genesis_builder_helper::{build_config, create_default_config}; -// A few exports that help ease life for downstream crates. pub use frame_support::{ construct_runtime, derive_impl, parameter_types, traits::{ @@ -95,8 +91,8 @@ pub mod opaque { // https://docs.substrate.io/main-docs/build/upgrade#runtime-versioning #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("node-template"), - impl_name: create_runtime_str!("node-template"), + spec_name: create_runtime_str!("solochain-template-runtime"), + impl_name: create_runtime_str!("solochain-template-runtime"), authoring_version: 1, // The version of the runtime specification. A full node will not attempt to use its native // runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, @@ -266,6 +262,7 @@ construct_runtime!( Balances: pallet_balances, TransactionPayment: pallet_transaction_payment, Sudo: pallet_sudo, + // Include the custom logic from the pallet-template in the runtime. TemplateModule: pallet_template, } -- GitLab From c367ac2488db23a68e1cdf446cece58c66ea93d5 Mon Sep 17 00:00:00 2001 From: Rodrigo Quelhas <22591718+RomarQ@users.noreply.github.com> Date: Tue, 5 Mar 2024 14:05:04 +0000 Subject: [PATCH 45/86] remove deprecated type 'GenesisConfig' (#3378) # Description Removed deprecated type `GenesisConfig` from the codebase. Closes https://github.com/paritytech/polkadot-sdk/issues/175 # Checklist - [x] My PR includes a detailed description as outlined in the "Description" section above - [x] My PR follows the [labeling requirements](CONTRIBUTING.md#Process) of this project (at minimum one label for `T` required) - [x] I have made corresponding changes to the documentation (if applicable) --------- Co-authored-by: Liam Aharon Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> --- prdoc/pr_3378.prdoc | 13 +++++++++ .../chain-spec/src/genesis_config_builder.rs | 24 ++++++++------- .../src/construct_runtime/expand/config.rs | 4 --- .../deprecated_where_block.stderr | 2 +- .../primitives/genesis-builder/Cargo.toml | 2 +- .../primitives/genesis-builder/src/lib.rs | 29 ++++++++++--------- 6 files changed, 43 insertions(+), 31 deletions(-) create mode 100644 prdoc/pr_3378.prdoc diff --git a/prdoc/pr_3378.prdoc b/prdoc/pr_3378.prdoc new file mode 100644 index 00000000000..2470fc15851 --- /dev/null +++ b/prdoc/pr_3378.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove deprecated GenesisConfig + +doc: + - audience: Runtime Dev + description: | + Removes deprecated type `GenesisConfig`, it was replaced by `RuntimeGenesisConfig` on May 24 of 2023. + The type `GenesisConfig` was deprecated on May 24 of 2023 [#14210](https://github.com/paritytech/substrate/pull/14210) + +crates: + - name: frame-support-procedural \ No newline at end of file diff --git a/substrate/client/chain-spec/src/genesis_config_builder.rs b/substrate/client/chain-spec/src/genesis_config_builder.rs index 6b956316203..c8b54f66be6 100644 --- a/substrate/client/chain-spec/src/genesis_config_builder.rs +++ b/substrate/client/chain-spec/src/genesis_config_builder.rs @@ -81,7 +81,8 @@ where .0 } - /// Returns the default `GenesisConfig` provided by the `runtime`. + /// Returns a json representation of the default `RuntimeGenesisConfig` provided by the + /// `runtime`. /// /// Calls [`GenesisBuilder::create_default_config`](sp_genesis_builder::GenesisBuilder::create_default_config) in the `runtime`. pub fn get_default_config(&self) -> core::result::Result { @@ -94,7 +95,7 @@ where Ok(from_slice(&default_config[..]).expect("returned value is json. qed.")) } - /// Build the given `GenesisConfig` and returns the genesis state. + /// Builds `RuntimeGenesisConfig` from given json blob and returns the genesis state. /// /// Calls [`GenesisBuilder::build_config`](sp_genesis_builder::GenesisBuilder::build_config) /// provided by the `runtime`. @@ -111,25 +112,26 @@ where Ok(ext.into_storages()) } - /// Creates the genesis state by patching the default `GenesisConfig` and applying it. + /// Creates the genesis state by patching the default `RuntimeGenesisConfig`. /// - /// This function generates the `GenesisConfig` for the runtime by applying a provided JSON - /// patch. The patch modifies the default `GenesisConfig` allowing customization of the specific - /// keys. The resulting `GenesisConfig` is then deserialized from the patched JSON - /// representation and stored in the storage. + /// This function generates the `RuntimeGenesisConfig` for the runtime by applying a provided + /// JSON patch. The patch modifies the default `RuntimeGenesisConfig` allowing customization of + /// the specific keys. The resulting `RuntimeGenesisConfig` is then deserialized from the + /// patched JSON representation and stored in the storage. /// /// If the provided JSON patch is incorrect or the deserialization fails the error will be /// returned. /// - /// The patching process modifies the default `GenesisConfig` according to the followingΒ rules: + /// The patching process modifies the default `RuntimeGenesisConfig` according to the following + /// rules: /// 1. Existing keys in the default configuration will be overridden by the corresponding values /// in the patch. /// 2. If a key exists in the patch but not in the default configuration, it will be added to - /// the resulting `GenesisConfig`. + /// the resulting `RuntimeGenesisConfig`. /// 3. Keys in the default configuration that have null values in the patch will be removed from - /// the resulting `GenesisConfig`. This is helpful for changing enum variant value. + /// the resulting `RuntimeGenesisConfig`. This is helpful for changing enum variant value. /// - /// Please note that the patch may contain full `GenesisConfig`. + /// Please note that the patch may contain full `RuntimeGenesisConfig`. pub fn get_storage_for_patch(&self, patch: Value) -> core::result::Result { let mut config = self.get_default_config()?; crate::json_patch::merge(&mut config, patch); diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs index 5613047359a..dbbe6ba6e6c 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs @@ -76,10 +76,6 @@ pub fn expand_outer_config( #fields } - #[cfg(any(feature = "std", test))] - #[deprecated(note = "GenesisConfig is planned to be removed in December 2023. Use `RuntimeGenesisConfig` instead.")] - pub type GenesisConfig = RuntimeGenesisConfig; - #[cfg(any(feature = "std", test))] impl #scrate::sp_runtime::BuildStorage for RuntimeGenesisConfig { fn assimilate_storage( diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index 9dd460da75a..c677ddcd672 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -347,7 +347,7 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied 26 | System: frame_system::{Pallet, Call, Storage, Config, Event}, | ^^^^^^ the trait `Config` is not implemented for `Runtime` | -note: required by a bound in `frame_system::GenesisConfig` +note: required by a bound in `GenesisConfig` --> $WORKSPACE/substrate/frame/system/src/lib.rs | | pub struct GenesisConfig { diff --git a/substrate/primitives/genesis-builder/Cargo.toml b/substrate/primitives/genesis-builder/Cargo.toml index bf12433c5f4..15440b4811e 100644 --- a/substrate/primitives/genesis-builder/Cargo.toml +++ b/substrate/primitives/genesis-builder/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" repository.workspace = true -description = "Substrate GenesisConfig builder API" +description = "Substrate RuntimeGenesisConfig builder API" readme = "README.md" [lints] diff --git a/substrate/primitives/genesis-builder/src/lib.rs b/substrate/primitives/genesis-builder/src/lib.rs index e002cd3aa6f..bb1a2a35248 100644 --- a/substrate/primitives/genesis-builder/src/lib.rs +++ b/substrate/primitives/genesis-builder/src/lib.rs @@ -19,36 +19,37 @@ //! Substrate genesis config builder //! -//! This Runtime API allows to construct `GenesisConfig`, in particular: -//! - serialize the runtime default `GenesisConfig` struct into json format, -//! - put the GenesisConfig struct into the storage. Internally this operation calls +//! This Runtime API allows to construct `RuntimeGenesisConfig`, in particular: +//! - serialize the runtime default `RuntimeGenesisConfig` struct into json format, +//! - put the RuntimeGenesisConfig struct into the storage. Internally this operation calls //! `GenesisBuild::build` function for all runtime pallets, which is typically provided by //! pallet's author. -//! - deserialize the `GenesisConfig` from given json blob and put `GenesisConfig` into the state -//! storage. Allows to build customized configuration. +//! - deserialize the `RuntimeGenesisConfig` from given json blob and put `RuntimeGenesisConfig` +//! into the state storage. Allows to build customized configuration. //! -//! Providing externalities with empty storage and putting `GenesisConfig` into storage allows to -//! catch and build the raw storage of `GenesisConfig` which is the foundation for genesis block. +//! Providing externalities with empty storage and putting `RuntimeGenesisConfig` into storage +//! allows to catch and build the raw storage of `RuntimeGenesisConfig` which is the foundation for +//! genesis block. /// The result type alias, used in build methods. `Err` contains formatted error message. pub type Result = core::result::Result<(), sp_runtime::RuntimeString>; sp_api::decl_runtime_apis! { - /// API to interact with GenesisConfig for the runtime + /// API to interact with RuntimeGenesisConfig for the runtime pub trait GenesisBuilder { - /// Creates the default `GenesisConfig` and returns it as a JSON blob. + /// Creates the default `RuntimeGenesisConfig` and returns it as a JSON blob. /// - /// This function instantiates the default `GenesisConfig` struct for the runtime and serializes it into a JSON - /// blob. It returns a `Vec` containing the JSON representation of the default `GenesisConfig`. + /// This function instantiates the default `RuntimeGenesisConfig` struct for the runtime and serializes it into a JSON + /// blob. It returns a `Vec` containing the JSON representation of the default `RuntimeGenesisConfig`. fn create_default_config() -> sp_std::vec::Vec; - /// Build `GenesisConfig` from a JSON blob not using any defaults and store it in the storage. + /// Build `RuntimeGenesisConfig` from a JSON blob not using any defaults and store it in the storage. /// - /// This function deserializes the full `GenesisConfig` from the given JSON blob and puts it into the storage. + /// This function deserializes the full `RuntimeGenesisConfig` from the given JSON blob and puts it into the storage. /// If the provided JSON blob is incorrect or incomplete or the deserialization fails, an error is returned. /// It is recommended to log any errors encountered during the process. /// - /// Please note that provided json blob must contain all `GenesisConfig` fields, no defaults will be used. + /// Please note that provided json blob must contain all `RuntimeGenesisConfig` fields, no defaults will be used. fn build_config(json: sp_std::vec::Vec) -> Result; } } -- GitLab From 329c07723614836a05b1d9158e1a8fdea6fb6508 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Tue, 5 Mar 2024 18:16:24 +0100 Subject: [PATCH 46/86] [FRAME] Use 'ready' pages in XCMP suspend logic (#2393) Changes: - `QueueFootprint` gets a new field; `ready_pages` that contains the non-overweight and not yet processed pages. - `XCMP` queue pallet is change to use the `ready_pages` instead of `pages` to calculate the channel suspension thresholds. This should give the XCMP queue pallet a more correct view of when to suspend channels. --------- Signed-off-by: Oliver Tale-Yazdi --- cumulus/pallets/xcmp-queue/src/lib.rs | 6 +++--- cumulus/pallets/xcmp-queue/src/mock.rs | 1 + prdoc/pr_2393.prdoc | 16 ++++++++++++++++ .../frame/message-queue/src/integration_test.rs | 5 +++++ substrate/frame/message-queue/src/lib.rs | 9 +++++++-- substrate/frame/message-queue/src/mock.rs | 4 ++-- substrate/frame/message-queue/src/tests.rs | 13 +++++++++---- substrate/frame/support/src/traits/messages.rs | 2 ++ 8 files changed, 45 insertions(+), 11 deletions(-) create mode 100644 prdoc/pr_2393.prdoc diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index 5b900769622..e92169be16b 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -600,7 +600,7 @@ impl Pallet { let QueueConfigData { drop_threshold, .. } = >::get(); let fp = T::XcmpQueue::footprint(sender); // Assume that it will not fit into the current page: - let new_pages = fp.pages.saturating_add(1); + let new_pages = fp.ready_pages.saturating_add(1); if new_pages > drop_threshold { // This should not happen since the channel should have been suspended in // [`on_queue_changed`]. @@ -663,12 +663,12 @@ impl OnQueueChanged for Pallet { let mut suspended_channels = >::get(); let suspended = suspended_channels.contains(¶); - if suspended && fp.pages <= resume_threshold { + if suspended && fp.ready_pages <= resume_threshold { Self::send_signal(para, ChannelSignal::Resume); suspended_channels.remove(¶); >::put(suspended_channels); - } else if !suspended && fp.pages >= suspend_threshold { + } else if !suspended && fp.ready_pages >= suspend_threshold { log::warn!("XCMP queue for sibling {:?} is full; suspending channel.", para); Self::send_signal(para, ChannelSignal::Suspend); diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index 08ab58ce816..1fb88cafd93 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -247,6 +247,7 @@ impl> EnqueueMessage for EnqueueToLocalStorage } } footprint.pages = footprint.storage.size as u32 / 16; // Number does not matter + footprint.ready_pages = footprint.pages; footprint } } diff --git a/prdoc/pr_2393.prdoc b/prdoc/pr_2393.prdoc new file mode 100644 index 00000000000..708d017fafd --- /dev/null +++ b/prdoc/pr_2393.prdoc @@ -0,0 +1,16 @@ +title: "[XCMP] Use the number of 'ready' pages in XCMP suspend logic" + +doc: + - audience: Runtime Dev + description: | + Semantics of the suspension logic in the XCMP queue pallet change from using the number of + total pages to the number of 'ready' pages. The number of ready pages is now also exposed by + the `MessageQueue` pallet to downstream via the queue `footprint`. + +crates: + - name: cumulus-pallet-xcmp-queue + bump: patch + - name: pallet-message-queue + bump: patch + - name: frame-support + bump: major diff --git a/substrate/frame/message-queue/src/integration_test.rs b/substrate/frame/message-queue/src/integration_test.rs index cc3da6ebdc6..ce8eb80805a 100644 --- a/substrate/frame/message-queue/src/integration_test.rs +++ b/substrate/frame/message-queue/src/integration_test.rs @@ -330,6 +330,11 @@ fn process_some_messages(num_msgs: u32) { ServiceWeight::set(Some(weight)); let consumed = next_block(); + for origin in BookStateFor::::iter_keys() { + let fp = MessageQueue::footprint(origin); + assert_eq!(fp.pages, fp.ready_pages); + } + assert_eq!(consumed, weight, "\n{}", MessageQueue::debug_info()); assert_eq!(NumMessagesProcessed::take(), num_msgs as usize); } diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index 07eb0041985..61028057394 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -208,8 +208,9 @@ use frame_support::{ defensive, pallet_prelude::*, traits::{ - Defensive, DefensiveTruncateFrom, EnqueueMessage, ExecuteOverweightError, Footprint, - ProcessMessage, ProcessMessageError, QueueFootprint, QueuePausedQuery, ServiceQueues, + Defensive, DefensiveSaturating, DefensiveTruncateFrom, EnqueueMessage, + ExecuteOverweightError, Footprint, ProcessMessage, ProcessMessageError, QueueFootprint, + QueuePausedQuery, ServiceQueues, }, BoundedSlice, CloneNoBound, DefaultNoBound, }; @@ -442,6 +443,7 @@ impl From> for QueueFootprint { fn from(book: BookState) -> Self { QueueFootprint { pages: book.count, + ready_pages: book.end.defensive_saturating_sub(book.begin), storage: Footprint { count: book.message_count, size: book.size }, } } @@ -1281,6 +1283,9 @@ impl Pallet { ensure!(book.message_count < 1 << 30, "Likely overflow or corruption"); ensure!(book.size < 1 << 30, "Likely overflow or corruption"); ensure!(book.count < 1 << 30, "Likely overflow or corruption"); + + let fp: QueueFootprint = book.into(); + ensure!(fp.ready_pages <= fp.pages, "There cannot be more ready than total pages"); } //loop around this origin diff --git a/substrate/frame/message-queue/src/mock.rs b/substrate/frame/message-queue/src/mock.rs index a46fa31df3e..d176a981ca8 100644 --- a/substrate/frame/message-queue/src/mock.rs +++ b/substrate/frame/message-queue/src/mock.rs @@ -355,8 +355,8 @@ pub fn num_overweight_enqueued_events() -> u32 { .count() as u32 } -pub fn fp(pages: u32, count: u64, size: u64) -> QueueFootprint { - QueueFootprint { storage: Footprint { count, size }, pages } +pub fn fp(pages: u32, ready_pages: u32, count: u64, size: u64) -> QueueFootprint { + QueueFootprint { storage: Footprint { count, size }, pages, ready_pages } } /// A random seed that can be overwritten with `MQ_SEED`. diff --git a/substrate/frame/message-queue/src/tests.rs b/substrate/frame/message-queue/src/tests.rs index 86a8b79fe8b..dbef94b3b10 100644 --- a/substrate/frame/message-queue/src/tests.rs +++ b/substrate/frame/message-queue/src/tests.rs @@ -1064,13 +1064,13 @@ fn footprint_num_pages_works() { MessageQueue::enqueue_message(msg("weight=2"), Here); MessageQueue::enqueue_message(msg("weight=3"), Here); - assert_eq!(MessageQueue::footprint(Here), fp(2, 2, 16)); + assert_eq!(MessageQueue::footprint(Here), fp(2, 2, 2, 16)); // Mark the messages as overweight. assert_eq!(MessageQueue::service_queues(1.into_weight()), 0.into_weight()); assert_eq!(System::events().len(), 2); - // Overweight does not change the footprint. - assert_eq!(MessageQueue::footprint(Here), fp(2, 2, 16)); + // `ready_pages` decreases but `page` count does not. + assert_eq!(MessageQueue::footprint(Here), fp(2, 0, 2, 16)); // Now execute the second message. assert_eq!( @@ -1078,7 +1078,7 @@ fn footprint_num_pages_works() { .unwrap(), 3.into_weight() ); - assert_eq!(MessageQueue::footprint(Here), fp(1, 1, 8)); + assert_eq!(MessageQueue::footprint(Here), fp(1, 0, 1, 8)); // And the first one: assert_eq!( ::execute_overweight(2.into_weight(), (Here, 0, 0)) @@ -1086,6 +1086,11 @@ fn footprint_num_pages_works() { 2.into_weight() ); assert_eq!(MessageQueue::footprint(Here), Default::default()); + assert_eq!(MessageQueue::footprint(Here), fp(0, 0, 0, 0)); + + // `ready_pages` and normal `pages` increases again: + MessageQueue::enqueue_message(msg("weight=3"), Here); + assert_eq!(MessageQueue::footprint(Here), fp(1, 1, 1, 8)); }) } diff --git a/substrate/frame/support/src/traits/messages.rs b/substrate/frame/support/src/traits/messages.rs index 995ac4f7179..f3d893bcc1d 100644 --- a/substrate/frame/support/src/traits/messages.rs +++ b/substrate/frame/support/src/traits/messages.rs @@ -123,6 +123,8 @@ impl ServiceQueues for NoopServiceQueues { pub struct QueueFootprint { /// The number of pages in the queue (including overweight pages). pub pages: u32, + /// The number of pages that are ready (not yet processed and also not overweight). + pub ready_pages: u32, /// The storage footprint of the queue (including overweight messages). pub storage: Footprint, } -- GitLab From 8f8297e9debe79139e3a41be8b54a5fcd603f783 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Wed, 6 Mar 2024 01:49:18 +0100 Subject: [PATCH 47/86] Permissioned contract deployment (#3377) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes: #3196 --------- Co-authored-by: Alexander Theißen Co-authored-by: PG Herveou --- .../contracts-rococo/src/contracts.rs | 3 + prdoc/pr_3377.prdoc | 14 ++ substrate/bin/node/runtime/src/lib.rs | 2 + .../src/parachain/contracts_config.rs | 4 +- substrate/frame/contracts/src/lib.rs | 35 ++++- substrate/frame/contracts/src/tests.rs | 131 +++++++++++++++++- 6 files changed, 182 insertions(+), 7 deletions(-) create mode 100644 prdoc/pr_3377.prdoc diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs index 681b95ce6a5..171ac6a9528 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs @@ -21,6 +21,7 @@ use frame_support::{ parameter_types, traits::{ConstBool, ConstU32, Nothing}, }; +use frame_system::EnsureSigned; use pallet_contracts::{ weights::SubstrateWeight, Config, DebugInfo, DefaultAddressGenerator, Frame, Schedule, }; @@ -65,6 +66,8 @@ impl Config for Runtime { type MaxCodeLen = ConstU32<{ 123 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; type UnsafeUnstableInterface = ConstBool; + type UploadOrigin = EnsureSigned; + type InstantiateOrigin = EnsureSigned; type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; type MaxDelegateDependencies = ConstU32<32>; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; diff --git a/prdoc/pr_3377.prdoc b/prdoc/pr_3377.prdoc new file mode 100644 index 00000000000..8e5b3935512 --- /dev/null +++ b/prdoc/pr_3377.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Permissioned contract deployment + +doc: + - audience: Runtime Dev + description: | + This PR introduces two new config types that specify the origins allowed to + upload and instantiate contract code. However, this check is not enforced when + a contract instantiates another contract. + +crates: +- name: pallet-contracts diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index f4615802515..18ce13cff80 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1365,6 +1365,8 @@ impl pallet_contracts::Config for Runtime { type MaxCodeLen = ConstU32<{ 123 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; type UnsafeUnstableInterface = ConstBool; + type UploadOrigin = EnsureSigned; + type InstantiateOrigin = EnsureSigned; type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; type RuntimeHoldReason = RuntimeHoldReason; #[cfg(not(feature = "runtime-benchmarks"))] diff --git a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs index 3f26c6f372e..3c06131dd60 100644 --- a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs +++ b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs @@ -25,7 +25,7 @@ use frame_support::{ traits::{ConstBool, ConstU32, Contains, Randomness}, weights::Weight, }; -use frame_system::pallet_prelude::BlockNumberFor; +use frame_system::{pallet_prelude::BlockNumberFor, EnsureSigned}; use pallet_xcm::BalanceOf; use sp_runtime::{traits::Convert, Perbill}; @@ -90,6 +90,8 @@ impl pallet_contracts::Config for Runtime { type Schedule = Schedule; type Time = super::Timestamp; type UnsafeUnstableInterface = ConstBool; + type UploadOrigin = EnsureSigned; + type InstantiateOrigin = EnsureSigned; type WeightInfo = (); type WeightPrice = Self; type Debug = (); diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 32d789a458f..f941f152ffe 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -379,6 +379,24 @@ pub mod pallet { #[pallet::constant] type MaxDebugBufferLen: Get; + /// Origin allowed to upload code. + /// + /// By default, it is safe to set this to `EnsureSigned`, allowing anyone to upload contract + /// code. + type UploadOrigin: EnsureOrigin; + + /// Origin allowed to instantiate code. + /// + /// # Note + /// + /// This is not enforced when a contract instantiates another contract. The + /// [`Self::UploadOrigin`] should make sure that no code is deployed that does unwanted + /// instantiations. + /// + /// By default, it is safe to set this to `EnsureSigned`, allowing anyone to instantiate + /// contract code. + type InstantiateOrigin: EnsureOrigin; + /// Overarching hold reason. type RuntimeHoldReason: From; @@ -636,7 +654,7 @@ pub mod pallet { determinism: Determinism, ) -> DispatchResult { Migration::::ensure_migrated()?; - let origin = ensure_signed(origin)?; + let origin = T::UploadOrigin::ensure_origin(origin)?; Self::bare_upload_code(origin, code, storage_deposit_limit.map(Into::into), determinism) .map(|_| ()) } @@ -785,11 +803,17 @@ pub mod pallet { salt: Vec, ) -> DispatchResultWithPostInfo { Migration::::ensure_migrated()?; - let origin = ensure_signed(origin)?; + + // These two origins will usually be the same; however, we treat them as separate since + // it is possible for the `Success` value of `UploadOrigin` and `InstantiateOrigin` to + // differ. + let upload_origin = T::UploadOrigin::ensure_origin(origin.clone())?; + let instantiate_origin = T::InstantiateOrigin::ensure_origin(origin)?; + let code_len = code.len() as u32; let (module, upload_deposit) = Self::try_upload_code( - origin.clone(), + upload_origin, code, storage_deposit_limit.clone().map(Into::into), Determinism::Enforced, @@ -803,7 +827,7 @@ pub mod pallet { let data_len = data.len() as u32; let salt_len = salt.len() as u32; let common = CommonInput { - origin: Origin::from_account_id(origin), + origin: Origin::from_account_id(instantiate_origin), value, data, gas_limit, @@ -844,10 +868,11 @@ pub mod pallet { salt: Vec, ) -> DispatchResultWithPostInfo { Migration::::ensure_migrated()?; + let origin = T::InstantiateOrigin::ensure_origin(origin)?; let data_len = data.len() as u32; let salt_len = salt.len() as u32; let common = CommonInput { - origin: Origin::from_runtime_origin(origin)?, + origin: Origin::from_account_id(origin), value, data, gas_limit, diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index 2d1c5b315a3..2f8f6d4a437 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -45,6 +45,7 @@ use frame_support::{ assert_err, assert_err_ignore_postinfo, assert_err_with_weight, assert_noop, assert_ok, derive_impl, dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, + pallet_prelude::EnsureOrigin, parameter_types, storage::child, traits::{ @@ -434,6 +435,33 @@ impl Contains for TestFilter { } } +parameter_types! { + pub static UploadAccount: Option<::AccountId> = None; + pub static InstantiateAccount: Option<::AccountId> = None; +} + +pub struct EnsureAccount(sp_std::marker::PhantomData<(T, A)>); +impl>>> + EnsureOrigin<::RuntimeOrigin> for EnsureAccount +where + ::AccountId: From, +{ + type Success = T::AccountId; + + fn try_origin(o: T::RuntimeOrigin) -> Result { + let who = as EnsureOrigin<_>>::try_origin(o.clone())?; + if matches!(A::get(), Some(a) if who != a) { + return Err(o) + } + + Ok(who) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Err(()) + } +} parameter_types! { pub static UnstableInterface: bool = true; } @@ -458,6 +486,8 @@ impl Config for Test { type MaxCodeLen = ConstU32<{ 123 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; type UnsafeUnstableInterface = UnstableInterface; + type UploadOrigin = EnsureAccount; + type InstantiateOrigin = EnsureAccount; type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; type RuntimeHoldReason = RuntimeHoldReason; type Migrations = crate::migration::codegen::BenchMigrations; @@ -5924,7 +5954,106 @@ fn root_cannot_instantiate() { vec![], vec![], ), - DispatchError::RootNotAllowed + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn only_upload_origin_can_upload() { + let (wasm, _) = compile_module::("dummy").unwrap(); + UploadAccount::set(Some(ALICE)); + ExtBuilder::default().build().execute_with(|| { + let _ = Balances::set_balance(&ALICE, 1_000_000); + let _ = Balances::set_balance(&BOB, 1_000_000); + + assert_err!( + Contracts::upload_code( + RuntimeOrigin::root(), + wasm.clone(), + None, + Determinism::Enforced, + ), + DispatchError::BadOrigin + ); + + assert_err!( + Contracts::upload_code( + RuntimeOrigin::signed(BOB), + wasm.clone(), + None, + Determinism::Enforced, + ), + DispatchError::BadOrigin + ); + + // Only alice is allowed to upload contract code. + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm.clone(), + None, + Determinism::Enforced, + )); + }); +} + +#[test] +fn only_instantiation_origin_can_instantiate() { + let (code, code_hash) = compile_module::("dummy").unwrap(); + InstantiateAccount::set(Some(ALICE)); + ExtBuilder::default().build().execute_with(|| { + let _ = Balances::set_balance(&ALICE, 1_000_000); + let _ = Balances::set_balance(&BOB, 1_000_000); + + assert_err_ignore_postinfo!( + Contracts::instantiate_with_code( + RuntimeOrigin::root(), + 0, + GAS_LIMIT, + None, + code.clone(), + vec![], + vec![], + ), + DispatchError::BadOrigin + ); + + assert_err_ignore_postinfo!( + Contracts::instantiate_with_code( + RuntimeOrigin::signed(BOB), + 0, + GAS_LIMIT, + None, + code.clone(), + vec![], + vec![], + ), + DispatchError::BadOrigin + ); + + // Only Alice can instantiate + assert_ok!(Contracts::instantiate_with_code( + RuntimeOrigin::signed(ALICE), + 0, + GAS_LIMIT, + None, + code, + vec![], + vec![], + ),); + + // Bob cannot instantiate with either `instantiate_with_code` or `instantiate`. + assert_err_ignore_postinfo!( + Contracts::instantiate( + RuntimeOrigin::signed(BOB), + 0, + GAS_LIMIT, + None, + code_hash, + vec![], + vec![], + ), + DispatchError::BadOrigin ); }); } -- GitLab From 475e7a147676a4e7a9d255ddc7969dd35ea22882 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Wed, 6 Mar 2024 07:47:33 +0100 Subject: [PATCH 48/86] Contracts: Charge min amount when processing deletion queue (#2934) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Alexander Theißen --- substrate/frame/contracts/src/storage.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/substrate/frame/contracts/src/storage.rs b/substrate/frame/contracts/src/storage.rs index 3304166607d..52c5150ca21 100644 --- a/substrate/frame/contracts/src/storage.rs +++ b/substrate/frame/contracts/src/storage.rs @@ -322,7 +322,8 @@ impl ContractInfo { KillStorageResult::SomeRemaining(_) => return weight_limit, KillStorageResult::AllRemoved(keys_removed) => { entry.remove(); - remaining_key_budget = remaining_key_budget.saturating_sub(keys_removed); + // charge at least one key even if none were removed. + remaining_key_budget = remaining_key_budget.saturating_sub(keys_removed.max(1)); }, }; } -- GitLab From f5bf4654e0b9cd089d1ca38ff3d692543ed555d3 Mon Sep 17 00:00:00 2001 From: Usama Ali Date: Wed, 6 Mar 2024 18:18:56 +0500 Subject: [PATCH 49/86] FIX: Make `sc-network-sync` types Public (#3586) This PR makes `sc-network-sync` types public following [this](https://github.com/paritytech/polkadot-sdk/issues/3556) issue. Fixes #3556 --- substrate/client/network/sync/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index e23a23e735d..9f6c0f45d08 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -28,7 +28,7 @@ mod justification_requests; mod pending_responses; mod request_metrics; mod schema; -mod types; +pub mod types; pub mod block_relay_protocol; pub mod block_request_handler; -- GitLab From adce09057dc3853a62af8b399fe046c60a2777ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Wed, 6 Mar 2024 14:54:36 +0000 Subject: [PATCH 50/86] sc-manual-seal: don't spawn threads in tests (#3595) Hopefully helps with test flakiness. --- .../client/consensus/manual-seal/src/lib.rs | 68 ++++++++----------- 1 file changed, 27 insertions(+), 41 deletions(-) diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index e3608f6716c..f04c0d42d60 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -429,7 +429,9 @@ mod tests { sender, } }); - let future = run_manual_seal(ManualSealParams { + + // spawn the background authorship task + tokio::spawn(run_manual_seal(ManualSealParams { block_import: client.clone(), env, client: client.clone(), @@ -438,12 +440,8 @@ mod tests { select_chain, create_inherent_data_providers: |_, _| async { Ok(()) }, consensus_data_provider: None, - }); - std::thread::spawn(|| { - let rt = tokio::runtime::Runtime::new().unwrap(); - // spawn the background authorship task - rt.block_on(future); - }); + })); + // submit a transaction to pool. let result = pool.submit_one(genesis_hash, SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported @@ -507,7 +505,8 @@ mod tests { } }); - let future_instant_seal = run_manual_seal(ManualSealParams { + // spawn the background authorship task + tokio::spawn(run_manual_seal(ManualSealParams { block_import: client.clone(), commands_stream, env, @@ -516,24 +515,16 @@ mod tests { select_chain, create_inherent_data_providers: |_, _| async { Ok(()) }, consensus_data_provider: None, - }); - std::thread::spawn(|| { - let rt = tokio::runtime::Runtime::new().unwrap(); - // spawn the background authorship task - rt.block_on(future_instant_seal); - }); + })); let delay_sec = 5; - let future_delayed_finalize = run_delayed_finalize(DelayedFinalizeParams { + + // spawn the background finality task + tokio::spawn(run_delayed_finalize(DelayedFinalizeParams { client: client.clone(), delay_sec, spawn_handle: spawner, - }); - std::thread::spawn(|| { - let rt = tokio::runtime::Runtime::new().unwrap(); - // spawn the background authorship task - rt.block_on(future_delayed_finalize); - }); + })); let mut finality_stream = client.finality_notification_stream(); // submit a transaction to pool. @@ -589,7 +580,9 @@ mod tests { // this test checks that blocks are created as soon as an engine command is sent over the // stream. let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); - let future = run_manual_seal(ManualSealParams { + + // spawn the background authorship task + tokio::spawn(run_manual_seal(ManualSealParams { block_import: client.clone(), env, client: client.clone(), @@ -598,12 +591,8 @@ mod tests { select_chain, consensus_data_provider: None, create_inherent_data_providers: |_, _| async { Ok(()) }, - }); - std::thread::spawn(|| { - let rt = tokio::runtime::Runtime::new().unwrap(); - // spawn the background authorship task - rt.block_on(future); - }); + })); + // submit a transaction to pool. let result = pool.submit_one(genesis_hash, SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported @@ -675,7 +664,9 @@ mod tests { // this test checks that blocks are created as soon as an engine command is sent over the // stream. let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); - let future = run_manual_seal(ManualSealParams { + + // spawn the background authorship task + tokio::spawn(run_manual_seal(ManualSealParams { block_import: client.clone(), env, client: client.clone(), @@ -684,12 +675,8 @@ mod tests { select_chain, consensus_data_provider: None, create_inherent_data_providers: |_, _| async { Ok(()) }, - }); - std::thread::spawn(|| { - let rt = tokio::runtime::Runtime::new().unwrap(); - // spawn the background authorship task - rt.block_on(future); - }); + })); + // submit a transaction to pool. let result = pool.submit_one(genesis_hash, SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported @@ -781,7 +768,9 @@ mod tests { let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); - let future = run_manual_seal(ManualSealParams { + + // spawn the background authorship task + tokio::spawn(run_manual_seal(ManualSealParams { block_import: client.clone(), env, client: client.clone(), @@ -791,11 +780,8 @@ mod tests { // use a provider that pushes some post digest data consensus_data_provider: Some(Box::new(TestDigestProvider { _client: client.clone() })), create_inherent_data_providers: |_, _| async { Ok(()) }, - }); - std::thread::spawn(|| { - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(future); - }); + })); + let (tx, rx) = futures::channel::oneshot::channel(); sink.send(EngineCommand::SealNewBlock { parent_hash: None, -- GitLab From f2f4b154d7641ef3c237a0d7df4b77280b5a81d0 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 6 Mar 2024 17:52:30 +0200 Subject: [PATCH 51/86] chainHead/follow: Provide multiple block hashes to the initialized event (#3445) This PR extends the Initialized event of the chainHead_follow subscription. Now, the event provides multiple finalized block hashes. This information allows clients that are disconnected, and that want to reconnect, to not lose information about the state of the chain. At the moment, the spec encourages servers to provide at least 1 minute of finalized blocks (~10 blocks). The users are responsible for unpinning these blocks at a later time. This PR tries to report at least 1 finalized block and at most 16 blocks, if they are available. Closes: https://github.com/paritytech/polkadot-sdk/issues/3432 cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile Co-authored-by: Niklas Adolfsson --- .../src/chain_head/chain_head_follow.rs | 63 +++++++++++++++---- .../rpc-spec-v2/src/chain_head/event.rs | 16 ++--- .../rpc-spec-v2/src/chain_head/tests.rs | 22 ++++--- 3 files changed, 73 insertions(+), 28 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs index e94374aebd9..afa99f3aa16 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs @@ -42,7 +42,14 @@ use sp_blockchain::{ Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, Info, }; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use std::{collections::HashSet, sync::Arc}; +use std::{ + collections::{HashSet, VecDeque}, + sync::Arc, +}; + +/// The maximum number of finalized blocks provided by the +/// `Initialized` event. +const MAX_FINALIZED_BLOCKS: usize = 16; use super::subscription::InsertedSubscriptionData; @@ -95,6 +102,8 @@ struct InitialBlocks { /// /// It is a tuple of (block hash, parent hash). finalized_block_descendants: Vec<(Block::Hash, Block::Hash)>, + /// Hashes of the last finalized blocks + finalized_block_hashes: VecDeque, /// Blocks that should not be reported as pruned by the `Finalized` event. /// /// Substrate database will perform the pruning of height N at @@ -178,13 +187,14 @@ where } /// Get the in-memory blocks of the client, starting from the provided finalized hash. + /// + /// The reported blocks are pinned by this function. fn get_init_blocks_with_forks( &self, - startup_point: &StartupPoint, + finalized: Block::Hash, ) -> Result, SubscriptionManagementError> { let blockchain = self.backend.blockchain(); let leaves = blockchain.leaves()?; - let finalized = startup_point.finalized_hash; let mut pruned_forks = HashSet::new(); let mut finalized_block_descendants = Vec::new(); let mut unique_descendants = HashSet::new(); @@ -198,17 +208,47 @@ where // Ensure a `NewBlock` event is generated for all children of the // finalized block. Describe the tree route as (child_node, parent_node) // Note: the order of elements matters here. - let parents = std::iter::once(finalized).chain(blocks.clone()); + let mut parent = finalized; + for child in blocks { + let pair = (child, parent); - for pair in blocks.zip(parents) { if unique_descendants.insert(pair) { + // The finalized block is pinned below. + self.sub_handle.pin_block(&self.sub_id, child)?; finalized_block_descendants.push(pair); } + + parent = child; } } } - Ok(InitialBlocks { finalized_block_descendants, pruned_forks }) + let mut current_block = finalized; + // The header of the finalized block must not be pruned. + let Some(header) = blockchain.header(current_block)? else { + return Err(SubscriptionManagementError::BlockHeaderAbsent); + }; + + // Report at most `MAX_FINALIZED_BLOCKS`. Note: The node might not have that many blocks. + let mut finalized_block_hashes = VecDeque::with_capacity(MAX_FINALIZED_BLOCKS); + + // Pin the finalized block. + self.sub_handle.pin_block(&self.sub_id, current_block)?; + finalized_block_hashes.push_front(current_block); + current_block = *header.parent_hash(); + + for _ in 0..MAX_FINALIZED_BLOCKS - 1 { + let Ok(Some(header)) = blockchain.header(current_block) else { break }; + // Block cannot be reported if pinning fails. + if self.sub_handle.pin_block(&self.sub_id, current_block).is_err() { + break + }; + + finalized_block_hashes.push_front(current_block); + current_block = *header.parent_hash(); + } + + Ok(InitialBlocks { finalized_block_descendants, finalized_block_hashes, pruned_forks }) } /// Generate the initial events reported by the RPC `follow` method. @@ -220,18 +260,17 @@ where startup_point: &StartupPoint, ) -> Result<(Vec>, HashSet), SubscriptionManagementError> { - let init = self.get_init_blocks_with_forks(startup_point)?; + let init = self.get_init_blocks_with_forks(startup_point.finalized_hash)?; + // The initialized event is the first one sent. let initial_blocks = init.finalized_block_descendants; + let finalized_block_hashes = init.finalized_block_hashes; - // The initialized event is the first one sent. let finalized_block_hash = startup_point.finalized_hash; - self.sub_handle.pin_block(&self.sub_id, finalized_block_hash)?; - let finalized_block_runtime = self.generate_runtime_event(finalized_block_hash, None); let initialized_event = FollowEvent::Initialized(Initialized { - finalized_block_hash, + finalized_block_hashes: finalized_block_hashes.into(), finalized_block_runtime, with_runtime: self.with_runtime, }); @@ -240,8 +279,6 @@ where finalized_block_descendants.push(initialized_event); for (child, parent) in initial_blocks.into_iter() { - self.sub_handle.pin_block(&self.sub_id, child)?; - let new_runtime = self.generate_runtime_event(child, Some(parent)); let event = FollowEvent::NewBlock(NewBlock { diff --git a/substrate/client/rpc-spec-v2/src/chain_head/event.rs b/substrate/client/rpc-spec-v2/src/chain_head/event.rs index 560ab87eab4..e0c804d16eb 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/event.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/event.rs @@ -111,8 +111,8 @@ impl From for RuntimeEvent { #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Initialized { - /// The hash of the latest finalized block. - pub finalized_block_hash: Hash, + /// The hash of the lastest finalized blocks. + pub finalized_block_hashes: Vec, /// The runtime version of the finalized block. /// /// # Note @@ -135,12 +135,12 @@ impl Serialize for Initialized { { if self.with_runtime { let mut state = serializer.serialize_struct("Initialized", 2)?; - state.serialize_field("finalizedBlockHash", &self.finalized_block_hash)?; + state.serialize_field("finalizedBlockHashes", &self.finalized_block_hashes)?; state.serialize_field("finalizedBlockRuntime", &self.finalized_block_runtime)?; state.end() } else { let mut state = serializer.serialize_struct("Initialized", 1)?; - state.serialize_field("finalizedBlockHash", &self.finalized_block_hash)?; + state.serialize_field("finalizedBlockHashes", &self.finalized_block_hashes)?; state.end() } } @@ -348,13 +348,13 @@ mod tests { fn follow_initialized_event_no_updates() { // Runtime flag is false. let event: FollowEvent = FollowEvent::Initialized(Initialized { - finalized_block_hash: "0x1".into(), + finalized_block_hashes: vec!["0x1".into()], finalized_block_runtime: None, with_runtime: false, }); let ser = serde_json::to_string(&event).unwrap(); - let exp = r#"{"event":"initialized","finalizedBlockHash":"0x1"}"#; + let exp = r#"{"event":"initialized","finalizedBlockHashes":["0x1"]}"#; assert_eq!(ser, exp); let event_dec: FollowEvent = serde_json::from_str(exp).unwrap(); @@ -373,7 +373,7 @@ mod tests { let runtime_event = RuntimeEvent::Valid(RuntimeVersionEvent { spec: runtime.into() }); let mut initialized = Initialized { - finalized_block_hash: "0x1".into(), + finalized_block_hashes: vec!["0x1".into()], finalized_block_runtime: Some(runtime_event), with_runtime: true, }; @@ -381,7 +381,7 @@ mod tests { let ser = serde_json::to_string(&event).unwrap(); let exp = concat!( - r#"{"event":"initialized","finalizedBlockHash":"0x1","#, + r#"{"event":"initialized","finalizedBlockHashes":["0x1"],"#, r#""finalizedBlockRuntime":{"type":"valid","spec":{"specName":"ABC","implName":"Impl","#, r#""specVersion":1,"implVersion":0,"apis":{},"transactionVersion":0}}}"#, ); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 89d8c4ce271..4d9dfb24e0a 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -173,7 +173,7 @@ async fn follow_subscription_produces_blocks() { // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Initialized(Initialized { - finalized_block_hash: format!("{:?}", finalized_hash), + finalized_block_hashes: vec![format!("{:?}", finalized_hash)], finalized_block_runtime: None, with_runtime: false, }); @@ -255,7 +255,7 @@ async fn follow_with_runtime() { Some(RuntimeEvent::Valid(RuntimeVersionEvent { spec: runtime.clone().into() })); // Runtime must always be reported with the first event. let expected = FollowEvent::Initialized(Initialized { - finalized_block_hash: format!("{:?}", finalized_hash), + finalized_block_hashes: vec![format!("{:?}", finalized_hash)], finalized_block_runtime, with_runtime: false, }); @@ -1344,7 +1344,7 @@ async fn follow_generates_initial_blocks() { // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Initialized(Initialized { - finalized_block_hash: format!("{:?}", finalized_hash), + finalized_block_hashes: vec![format!("{:?}", finalized_hash)], finalized_block_runtime: None, with_runtime: false, }); @@ -1896,7 +1896,7 @@ async fn follow_prune_best_block() { // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Initialized(Initialized { - finalized_block_hash: format!("{:?}", finalized_hash), + finalized_block_hashes: vec![format!("{:?}", finalized_hash)], finalized_block_runtime: None, with_runtime: false, }); @@ -2081,6 +2081,7 @@ async fn follow_forks_pruned_block() { // ^^^ finalized // -> block 1 -> block 2_f -> block 3_f // + let finalized_hash = client.info().finalized_hash; let block_1 = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -2090,6 +2091,7 @@ async fn follow_forks_pruned_block() { .build() .unwrap() .block; + let block_1_hash = block_1.header.hash(); client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); let block_2 = BlockBuilderBuilder::new(&*client) @@ -2100,6 +2102,7 @@ async fn follow_forks_pruned_block() { .build() .unwrap() .block; + let block_2_hash = block_2.header.hash(); client.import(BlockOrigin::Own, block_2.clone()).await.unwrap(); let block_3 = BlockBuilderBuilder::new(&*client) @@ -2156,7 +2159,12 @@ async fn follow_forks_pruned_block() { // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Initialized(Initialized { - finalized_block_hash: format!("{:?}", block_3_hash), + finalized_block_hashes: vec![ + format!("{:?}", finalized_hash), + format!("{:?}", block_1_hash), + format!("{:?}", block_2_hash), + format!("{:?}", block_3_hash), + ], finalized_block_runtime: None, with_runtime: false, }); @@ -2310,7 +2318,7 @@ async fn follow_report_multiple_pruned_block() { // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Initialized(Initialized { - finalized_block_hash: format!("{:?}", finalized_hash), + finalized_block_hashes: vec![format!("{:?}", finalized_hash)], finalized_block_runtime: None, with_runtime: false, }); @@ -2632,7 +2640,7 @@ async fn follow_finalized_before_new_block() { let finalized_hash = client.info().finalized_hash; let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Initialized(Initialized { - finalized_block_hash: format!("{:?}", finalized_hash), + finalized_block_hashes: vec![format!("{:?}", finalized_hash)], finalized_block_runtime: None, with_runtime: false, }); -- GitLab From 9952786e1b688109b1571248042d71b82bbe2256 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Wed, 6 Mar 2024 17:35:55 +0100 Subject: [PATCH 52/86] Contracts upload with Determinism::Enforced when possible (#3540) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Cyrill Leutwiler Co-authored-by: command-bot <> Co-authored-by: Alexander Theißen --- prdoc/pr_3540.prdoc | 10 + .../frame/contracts/src/benchmarking/code.rs | 19 +- .../frame/contracts/src/benchmarking/mod.rs | 27 +- substrate/frame/contracts/src/lib.rs | 11 +- substrate/frame/contracts/src/tests.rs | 20 + substrate/frame/contracts/src/wasm/mod.rs | 5 + substrate/frame/contracts/src/wasm/prepare.rs | 25 +- substrate/frame/contracts/src/weights.rs | 1275 +++++++++-------- 8 files changed, 756 insertions(+), 636 deletions(-) create mode 100644 prdoc/pr_3540.prdoc diff --git a/prdoc/pr_3540.prdoc b/prdoc/pr_3540.prdoc new file mode 100644 index 00000000000..d0a91882b6b --- /dev/null +++ b/prdoc/pr_3540.prdoc @@ -0,0 +1,10 @@ +title: "[pallet-contracts] Only allow non-deterministic code to be uploaded with Determinism::Relaxed" + +doc: + - audience: Runtime Dev + description: | + The `upload_code` extrinsic, will now only allow non-deterministic code to be uploaded with the `Determinism::Relaxed` flag. + This prevent an attacker from uploading "deterministic" code with the `Determinism::Relaxed` flag, preventing the code to be instantiated for on-chain execution. + +crates: + - name: pallet-contracts diff --git a/substrate/frame/contracts/src/benchmarking/code.rs b/substrate/frame/contracts/src/benchmarking/code.rs index 644c2abf0a8..96ce11f8c41 100644 --- a/substrate/frame/contracts/src/benchmarking/code.rs +++ b/substrate/frame/contracts/src/benchmarking/code.rs @@ -26,7 +26,7 @@ use crate::Config; use frame_support::traits::Get; -use sp_runtime::traits::Hash; +use sp_runtime::{traits::Hash, Saturating}; use sp_std::{borrow::ToOwned, prelude::*}; use wasm_instrument::parity_wasm::{ builder, @@ -262,22 +262,25 @@ impl WasmModule { /// `instantiate_with_code` for different sizes of wasm modules. The generated module maximizes /// instrumentation runtime by nesting blocks as deeply as possible given the byte budget. /// `code_location`: Whether to place the code into `deploy` or `call`. - pub fn sized(target_bytes: u32, code_location: Location) -> Self { + pub fn sized(target_bytes: u32, code_location: Location, use_float: bool) -> Self { use self::elements::Instruction::{End, GetLocal, If, Return}; // Base size of a contract is 63 bytes and each expansion adds 6 bytes. // We do one expansion less to account for the code section and function body // size fields inside the binary wasm module representation which are leb128 encoded // and therefore grow in size when the contract grows. We are not allowed to overshoot // because of the maximum code size that is enforced by `instantiate_with_code`. - let expansions = (target_bytes.saturating_sub(63) / 6).saturating_sub(1); + let mut expansions = (target_bytes.saturating_sub(63) / 6).saturating_sub(1); const EXPANSION: [Instruction; 4] = [GetLocal(0), If(BlockType::NoResult), Return, End]; + let mut locals = vec![Local::new(1, ValueType::I32)]; + if use_float { + locals.push(Local::new(1, ValueType::F32)); + locals.push(Local::new(2, ValueType::F32)); + locals.push(Local::new(3, ValueType::F32)); + expansions.saturating_dec(); + } let mut module = ModuleDefinition { memory: Some(ImportedMemory::max::()), ..Default::default() }; - let body = Some(body::repeated_with_locals( - &[Local::new(1, ValueType::I32)], - expansions, - &EXPANSION, - )); + let body = Some(body::repeated_with_locals(&locals, expansions, &EXPANSION)); match code_location { Location::Call => module.call_body = body, Location::Deploy => module.deploy_body = body, diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index 4b6ff63c6f5..2ecd56b412d 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -362,7 +362,7 @@ benchmarks! { call_with_code_per_byte { let c in 0 .. T::MaxCodeLen::get(); let instance = Contract::::with_caller( - whitelisted_caller(), WasmModule::sized(c, Location::Deploy), vec![], + whitelisted_caller(), WasmModule::sized(c, Location::Deploy, false), vec![], )?; let value = Pallet::::min_balance(); let origin = RawOrigin::Signed(instance.caller.clone()); @@ -389,7 +389,7 @@ benchmarks! { let value = Pallet::::min_balance(); let caller = whitelisted_caller(); T::Currency::set_balance(&caller, caller_funding::()); - let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call); + let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call, false); let origin = RawOrigin::Signed(caller.clone()); let addr = Contracts::::contract_address(&caller, &hash, &input, &salt); }: _(origin, value, Weight::MAX, None, code, input, salt) @@ -468,19 +468,36 @@ benchmarks! { // It creates a maximum number of metering blocks per byte. // `c`: Size of the code in bytes. #[pov_mode = Measured] - upload_code { + upload_code_determinism_enforced { let c in 0 .. T::MaxCodeLen::get(); let caller = whitelisted_caller(); T::Currency::set_balance(&caller, caller_funding::()); - let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call); + let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call, false); let origin = RawOrigin::Signed(caller.clone()); - }: _(origin, code, None, Determinism::Enforced) + }: upload_code(origin, code, None, Determinism::Enforced) verify { // uploading the code reserves some balance in the callers account assert!(T::Currency::total_balance_on_hold(&caller) > 0u32.into()); assert!(>::code_exists(&hash)); } + // Uploading code with [`Determinism::Relaxed`] should be more expensive than uploading code with [`Determinism::Enforced`], + // as we always try to save the code with [`Determinism::Enforced`] first. + #[pov_mode = Measured] + upload_code_determinism_relaxed { + let c in 0 .. T::MaxCodeLen::get(); + let caller = whitelisted_caller(); + T::Currency::set_balance(&caller, caller_funding::()); + let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call, true); + let origin = RawOrigin::Signed(caller.clone()); + }: upload_code(origin, code, None, Determinism::Relaxed) + verify { + assert!(T::Currency::total_balance_on_hold(&caller) > 0u32.into()); + assert!(>::code_exists(&hash)); + // Ensure that the benchmark follows the most expensive path, i.e., the code is saved with [`Determinism::Relaxed`] after trying to save it with [`Determinism::Enforced`]. + assert_eq!(CodeInfoOf::::get(&hash).unwrap().determinism(), Determinism::Relaxed); + } + // Removing code does not depend on the size of the contract because all the information // needed to verify the removal claim (refcount, owner) is stored in a separate storage // item (`CodeInfoOf`). diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index f941f152ffe..0511bcf7f3f 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -645,8 +645,17 @@ pub mod pallet { /// To avoid this situation a constructor could employ access control so that it can /// only be instantiated by permissioned entities. The same is true when uploading /// through [`Self::instantiate_with_code`]. + /// + /// Use [`Determinism::Relaxed`] exclusively for non-deterministic code. If the uploaded + /// code is deterministic, specifying [`Determinism::Relaxed`] will be disregarded and + /// result in higher gas costs. #[pallet::call_index(3)] - #[pallet::weight(T::WeightInfo::upload_code(code.len() as u32))] + #[pallet::weight( + match determinism { + Determinism::Enforced => T::WeightInfo::upload_code_determinism_enforced(code.len() as u32), + Determinism::Relaxed => T::WeightInfo::upload_code_determinism_relaxed(code.len() as u32), + } + )] pub fn upload_code( origin: OriginFor, code: Vec, diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index 2f8f6d4a437..011b2b25b2b 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -5185,6 +5185,26 @@ fn deposit_limit_honors_min_leftover() { }); } +#[test] +fn upload_should_enforce_deterministic_mode_when_possible() { + let upload = |fixture, determinism| { + let (wasm, code_hash) = compile_module::(fixture).unwrap(); + ExtBuilder::default() + .build() + .execute_with(|| -> Result { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + Contracts::bare_upload_code(ALICE, wasm, None, determinism)?; + let info = CodeInfoOf::::get(code_hash).unwrap(); + Ok(info.determinism()) + }) + }; + + assert_eq!(upload("dummy", Determinism::Enforced), Ok(Determinism::Enforced)); + assert_eq!(upload("dummy", Determinism::Relaxed), Ok(Determinism::Enforced)); + assert_eq!(upload("float_instruction", Determinism::Relaxed), Ok(Determinism::Relaxed)); + assert!(upload("float_instruction", Determinism::Enforced).is_err()); +} + #[test] fn cannot_instantiate_indeterministic_code() { let (wasm, code_hash) = compile_module::("float_instruction").unwrap(); diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index 34b45cba5d2..87e92cd72e8 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -313,6 +313,11 @@ impl CodeInfo { } } + /// Returns the determinism of the module. + pub fn determinism(&self) -> Determinism { + self.determinism + } + /// Returns reference count of the module. pub fn refcount(&self) -> u64 { self.refcount diff --git a/substrate/frame/contracts/src/wasm/prepare.rs b/substrate/frame/contracts/src/wasm/prepare.rs index 5cdf5600fcd..5efea8c3a23 100644 --- a/substrate/frame/contracts/src/wasm/prepare.rs +++ b/substrate/frame/contracts/src/wasm/prepare.rs @@ -79,7 +79,10 @@ impl LoadedModule { } let engine = Engine::new(&config); - let module = Module::new(&engine, code).map_err(|_| "Can't load the module into wasmi!")?; + let module = Module::new(&engine, code).map_err(|err| { + log::debug!(target: LOG_TARGET, "Module creation failed: {:?}", err); + "Can't load the module into wasmi!" + })?; // Return a `LoadedModule` instance with // __valid__ module. @@ -220,7 +223,7 @@ impl LoadedModule { fn validate( code: &[u8], schedule: &Schedule, - determinism: Determinism, + determinism: &mut Determinism, ) -> Result<(), (DispatchError, &'static str)> where E: Environment<()>, @@ -229,7 +232,17 @@ where (|| { // We check that the module is generally valid, // and does not have restricted WebAssembly features, here. - let contract_module = LoadedModule::new::(code, determinism, None)?; + let contract_module = match *determinism { + Determinism::Relaxed => + if let Ok(module) = LoadedModule::new::(code, Determinism::Enforced, None) { + *determinism = Determinism::Enforced; + module + } else { + LoadedModule::new::(code, Determinism::Relaxed, None)? + }, + Determinism::Enforced => LoadedModule::new::(code, Determinism::Enforced, None)?, + }; + // The we check that module satisfies constraints the pallet puts on contracts. contract_module.scan_exports()?; contract_module.scan_imports::(schedule)?; @@ -252,7 +265,7 @@ where &code, (), schedule, - determinism, + *determinism, stack_limits, AllowDeprecatedInterface::No, ) @@ -276,13 +289,13 @@ pub fn prepare( code: CodeVec, schedule: &Schedule, owner: AccountIdOf, - determinism: Determinism, + mut determinism: Determinism, ) -> Result, (DispatchError, &'static str)> where E: Environment<()>, T: Config, { - validate::(code.as_ref(), schedule, determinism)?; + validate::(code.as_ref(), schedule, &mut determinism)?; // Calculate deposit for storing contract code and `code_info` in two different storage items. let code_len = code.len() as u32; diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index 47ca17c7d80..6e09120c141 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_contracts` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-p5qp1txx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_contracts -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/contracts/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_contracts +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/contracts/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -69,7 +67,8 @@ pub trait WeightInfo { fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight; fn instantiate(i: u32, s: u32, ) -> Weight; fn call() -> Weight; - fn upload_code(c: u32, ) -> Weight; + fn upload_code_determinism_enforced(c: u32, ) -> Weight; + fn upload_code_determinism_relaxed(c: u32, ) -> Weight; fn remove_code() -> Weight; fn set_code() -> Weight; fn seal_caller(r: u32, ) -> Weight; @@ -143,8 +142,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_103_000 picoseconds. - Weight::from_parts(2_260_000, 1627) + // Minimum execution time: 2_054_000 picoseconds. + Weight::from_parts(2_178_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -154,10 +153,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `452 + k * (69 Β±0)` // Estimated: `442 + k * (70 Β±0)` - // Minimum execution time: 12_173_000 picoseconds. - Weight::from_parts(12_515_000, 442) - // Standard Error: 1_033 - .saturating_add(Weight::from_parts(1_075_765, 0).saturating_mul(k.into())) + // Minimum execution time: 12_119_000 picoseconds. + Weight::from_parts(12_536_000, 442) + // Standard Error: 1_254 + .saturating_add(Weight::from_parts(1_161_885, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -171,10 +170,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211 + c * (1 Β±0)` // Estimated: `6149 + c * (1 Β±0)` - // Minimum execution time: 8_054_000 picoseconds. - Weight::from_parts(8_503_485, 6149) + // Minimum execution time: 8_146_000 picoseconds. + Weight::from_parts(8_560_745, 6149) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_184, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_182, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -187,8 +186,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_966_000 picoseconds. - Weight::from_parts(17_445_000, 6450) + // Minimum execution time: 16_183_000 picoseconds. + Weight::from_parts(16_825_000, 6450) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -201,10 +200,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `171 + k * (1 Β±0)` // Estimated: `3635 + k * (1 Β±0)` - // Minimum execution time: 3_643_000 picoseconds. - Weight::from_parts(3_714_000, 3635) - // Standard Error: 1_056 - .saturating_add(Weight::from_parts(1_089_042, 0).saturating_mul(k.into())) + // Minimum execution time: 3_645_000 picoseconds. + Weight::from_parts(604_365, 3635) + // Standard Error: 1_013 + .saturating_add(Weight::from_parts(1_100_277, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -225,10 +224,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `328 + c * (1 Β±0)` // Estimated: `6266 + c * (1 Β±0)` - // Minimum execution time: 19_891_000 picoseconds. - Weight::from_parts(19_961_608, 6266) + // Minimum execution time: 19_500_000 picoseconds. + Weight::from_parts(20_074_895, 6266) // Standard Error: 1 - .saturating_add(Weight::from_parts(429, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(422, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -239,8 +238,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 12_483_000 picoseconds. - Weight::from_parts(13_205_000, 6380) + // Minimum execution time: 12_530_000 picoseconds. + Weight::from_parts(13_362_000, 6380) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -254,8 +253,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 42_443_000 picoseconds. - Weight::from_parts(43_420_000, 6292) + // Minimum execution time: 44_275_000 picoseconds. + Weight::from_parts(45_718_000, 6292) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -267,8 +266,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 52_282_000 picoseconds. - Weight::from_parts(54_110_000, 6534) + // Minimum execution time: 55_095_000 picoseconds. + Weight::from_parts(58_009_000, 6534) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -278,8 +277,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_539_000 picoseconds. - Weight::from_parts(2_644_000, 1627) + // Minimum execution time: 2_455_000 picoseconds. + Weight::from_parts(2_608_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -291,8 +290,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 9_473_000 picoseconds. - Weight::from_parts(9_907_000, 3631) + // Minimum execution time: 11_207_000 picoseconds. + Weight::from_parts(11_802_000, 3631) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -302,8 +301,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 3_532_000 picoseconds. - Weight::from_parts(3_768_000, 3607) + // Minimum execution time: 4_581_000 picoseconds. + Weight::from_parts(4_781_000, 3607) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -314,8 +313,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 5_036_000 picoseconds. - Weight::from_parts(5_208_000, 3632) + // Minimum execution time: 5_887_000 picoseconds. + Weight::from_parts(6_393_000, 3632) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -326,8 +325,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_881_000 picoseconds. - Weight::from_parts(5_149_000, 3607) + // Minimum execution time: 6_025_000 picoseconds. + Weight::from_parts(6_298_000, 3607) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -352,10 +351,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `804 + c * (1 Β±0)` // Estimated: `9217 + c * (1 Β±0)` - // Minimum execution time: 280_047_000 picoseconds. - Weight::from_parts(270_065_882, 9217) - // Standard Error: 86 - .saturating_add(Weight::from_parts(34_361, 0).saturating_mul(c.into())) + // Minimum execution time: 294_976_000 picoseconds. + Weight::from_parts(277_235_948, 9217) + // Standard Error: 65 + .saturating_add(Weight::from_parts(34_445, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -387,14 +386,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `8740` - // Minimum execution time: 3_776_071_000 picoseconds. - Weight::from_parts(706_212_213, 8740) - // Standard Error: 149 - .saturating_add(Weight::from_parts(98_798, 0).saturating_mul(c.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(1_560, 0).saturating_mul(i.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(1_476, 0).saturating_mul(s.into())) + // Minimum execution time: 3_864_558_000 picoseconds. + Weight::from_parts(421_676_889, 8740) + // Standard Error: 188 + .saturating_add(Weight::from_parts(103_788, 0).saturating_mul(c.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_715, 0).saturating_mul(i.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_774, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } @@ -424,12 +423,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `563` // Estimated: `8982` - // Minimum execution time: 1_966_301_000 picoseconds. - Weight::from_parts(376_287_434, 8982) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_643, 0).saturating_mul(i.into())) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_667, 0).saturating_mul(s.into())) + // Minimum execution time: 2_069_276_000 picoseconds. + Weight::from_parts(377_210_279, 8982) + // Standard Error: 9 + .saturating_add(Weight::from_parts(1_719, 0).saturating_mul(i.into())) + // Standard Error: 9 + .saturating_add(Weight::from_parts(1_728, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -453,8 +452,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `829` // Estimated: `9244` - // Minimum execution time: 197_571_000 picoseconds. - Weight::from_parts(206_612_000, 9244) + // Minimum execution time: 199_037_000 picoseconds. + Weight::from_parts(213_931_000, 9244) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -471,14 +470,38 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. - fn upload_code(c: u32, ) -> Weight { + fn upload_code_determinism_enforced(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6085` - // Minimum execution time: 266_006_000 picoseconds. - Weight::from_parts(287_700_788, 6085) - // Standard Error: 67 - .saturating_add(Weight::from_parts(63_196, 0).saturating_mul(c.into())) + // Minimum execution time: 278_482_000 picoseconds. + Weight::from_parts(262_753_879, 6085) + // Standard Error: 112 + .saturating_add(Weight::from_parts(66_129, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `System::EventTopics` (r:1 w:1) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:0 w:1) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// The range of component `c` is `[0, 125952]`. + fn upload_code_determinism_relaxed(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `6085` + // Minimum execution time: 286_902_000 picoseconds. + Weight::from_parts(269_003_959, 6085) + // Standard Error: 123 + .saturating_add(Weight::from_parts(66_629, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -496,8 +519,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 42_714_000 picoseconds. - Weight::from_parts(44_713_000, 3780) + // Minimum execution time: 44_128_000 picoseconds. + Weight::from_parts(46_044_000, 3780) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -513,8 +536,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `552` // Estimated: `8967` - // Minimum execution time: 33_516_000 picoseconds. - Weight::from_parts(34_823_000, 8967) + // Minimum execution time: 33_567_000 picoseconds. + Weight::from_parts(35_057_000, 8967) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -539,10 +562,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `869 + r * (6 Β±0)` // Estimated: `9284 + r * (6 Β±0)` - // Minimum execution time: 246_563_000 picoseconds. - Weight::from_parts(274_999_814, 9284) - // Standard Error: 628 - .saturating_add(Weight::from_parts(347_395, 0).saturating_mul(r.into())) + // Minimum execution time: 272_376_000 picoseconds. + Weight::from_parts(284_162_161, 9284) + // Standard Error: 501 + .saturating_add(Weight::from_parts(341_575, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -568,10 +591,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `925 + r * (209 Β±0)` // Estimated: `9304 + r * (2684 Β±0)` - // Minimum execution time: 252_236_000 picoseconds. - Weight::from_parts(121_390_081, 9304) - // Standard Error: 6_206 - .saturating_add(Weight::from_parts(3_558_718, 0).saturating_mul(r.into())) + // Minimum execution time: 256_990_000 picoseconds. + Weight::from_parts(107_167_044, 9304) + // Standard Error: 7_545 + .saturating_add(Weight::from_parts(3_628_112, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -598,10 +621,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `924 + r * (213 Β±0)` // Estimated: `9308 + r * (2688 Β±0)` - // Minimum execution time: 269_427_000 picoseconds. - Weight::from_parts(134_879_389, 9308) - // Standard Error: 6_711 - .saturating_add(Weight::from_parts(4_408_658, 0).saturating_mul(r.into())) + // Minimum execution time: 272_889_000 picoseconds. + Weight::from_parts(110_341_799, 9308) + // Standard Error: 7_881 + .saturating_add(Weight::from_parts(4_427_043, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -628,10 +651,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `876 + r * (6 Β±0)` // Estimated: `9293 + r * (6 Β±0)` - // Minimum execution time: 249_382_000 picoseconds. - Weight::from_parts(266_305_110, 9293) - // Standard Error: 1_592 - .saturating_add(Weight::from_parts(460_001, 0).saturating_mul(r.into())) + // Minimum execution time: 275_027_000 picoseconds. + Weight::from_parts(283_302_223, 9293) + // Standard Error: 1_179 + .saturating_add(Weight::from_parts(436_023, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -657,10 +680,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `866 + r * (3 Β±0)` // Estimated: `9282 + r * (3 Β±0)` - // Minimum execution time: 252_145_000 picoseconds. - Weight::from_parts(277_211_668, 9282) - // Standard Error: 371 - .saturating_add(Weight::from_parts(174_394, 0).saturating_mul(r.into())) + // Minimum execution time: 270_137_000 picoseconds. + Weight::from_parts(282_756_362, 9282) + // Standard Error: 384 + .saturating_add(Weight::from_parts(171_660, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) @@ -684,10 +707,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `756 + r * (3 Β±0)` // Estimated: `9171 + r * (3 Β±0)` - // Minimum execution time: 251_595_000 picoseconds. - Weight::from_parts(268_102_575, 9171) - // Standard Error: 334 - .saturating_add(Weight::from_parts(154_836, 0).saturating_mul(r.into())) + // Minimum execution time: 255_131_000 picoseconds. + Weight::from_parts(269_207_006, 9171) + // Standard Error: 542 + .saturating_add(Weight::from_parts(161_597, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) @@ -713,10 +736,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `870 + r * (6 Β±0)` // Estimated: `9285 + r * (6 Β±0)` - // Minimum execution time: 251_404_000 picoseconds. - Weight::from_parts(278_747_574, 9285) - // Standard Error: 782 - .saturating_add(Weight::from_parts(341_598, 0).saturating_mul(r.into())) + // Minimum execution time: 272_172_000 picoseconds. + Weight::from_parts(283_747_134, 9285) + // Standard Error: 885 + .saturating_add(Weight::from_parts(343_968, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -742,10 +765,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `866 + r * (6 Β±0)` // Estimated: `9284 + r * (6 Β±0)` - // Minimum execution time: 255_649_000 picoseconds. - Weight::from_parts(276_509_641, 9284) - // Standard Error: 1_262 - .saturating_add(Weight::from_parts(380_225, 0).saturating_mul(r.into())) + // Minimum execution time: 263_220_000 picoseconds. + Weight::from_parts(277_062_382, 9284) + // Standard Error: 1_783 + .saturating_add(Weight::from_parts(399_631, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -771,10 +794,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1010 + r * (6 Β±0)` // Estimated: `9409 + r * (6 Β±0)` - // Minimum execution time: 267_143_000 picoseconds. - Weight::from_parts(298_166_116, 9409) - // Standard Error: 3_765 - .saturating_add(Weight::from_parts(1_620_886, 0).saturating_mul(r.into())) + // Minimum execution time: 253_218_000 picoseconds. + Weight::from_parts(282_251_452, 9409) + // Standard Error: 2_367 + .saturating_add(Weight::from_parts(1_604_060, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -800,10 +823,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `880 + r * (6 Β±0)` // Estimated: `9301 + r * (6 Β±0)` - // Minimum execution time: 250_013_000 picoseconds. - Weight::from_parts(281_469_583, 9301) - // Standard Error: 730 - .saturating_add(Weight::from_parts(339_260, 0).saturating_mul(r.into())) + // Minimum execution time: 271_797_000 picoseconds. + Weight::from_parts(288_594_393, 9301) + // Standard Error: 846 + .saturating_add(Weight::from_parts(335_063, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -829,10 +852,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `878 + r * (6 Β±0)` // Estimated: `9294 + r * (6 Β±0)` - // Minimum execution time: 256_219_000 picoseconds. - Weight::from_parts(275_309_266, 9294) - // Standard Error: 1_199 - .saturating_add(Weight::from_parts(350_824, 0).saturating_mul(r.into())) + // Minimum execution time: 254_997_000 picoseconds. + Weight::from_parts(284_331_894, 9294) + // Standard Error: 885 + .saturating_add(Weight::from_parts(337_073, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -858,10 +881,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `875 + r * (6 Β±0)` // Estimated: `9297 + r * (6 Β±0)` - // Minimum execution time: 264_644_000 picoseconds. - Weight::from_parts(283_856_744, 9297) - // Standard Error: 623 - .saturating_add(Weight::from_parts(334_175, 0).saturating_mul(r.into())) + // Minimum execution time: 273_845_000 picoseconds. + Weight::from_parts(285_291_242, 9297) + // Standard Error: 690 + .saturating_add(Weight::from_parts(332_783, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -887,10 +910,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `866 + r * (6 Β±0)` // Estimated: `9282 + r * (6 Β±0)` - // Minimum execution time: 263_364_000 picoseconds. - Weight::from_parts(281_379_508, 9282) - // Standard Error: 639 - .saturating_add(Weight::from_parts(338_021, 0).saturating_mul(r.into())) + // Minimum execution time: 268_302_000 picoseconds. + Weight::from_parts(280_463_257, 9282) + // Standard Error: 1_035 + .saturating_add(Weight::from_parts(345_811, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -918,10 +941,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `940 + r * (14 Β±0)` // Estimated: `9350 + r * (14 Β±0)` - // Minimum execution time: 269_667_000 picoseconds. - Weight::from_parts(284_662_802, 9350) - // Standard Error: 691 - .saturating_add(Weight::from_parts(799_249, 0).saturating_mul(r.into())) + // Minimum execution time: 263_433_000 picoseconds. + Weight::from_parts(290_098_370, 9350) + // Standard Error: 1_905 + .saturating_add(Weight::from_parts(805_299, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 14).saturating_mul(r.into())) @@ -947,10 +970,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `868 + r * (6 Β±0)` // Estimated: `9285 + r * (6 Β±0)` - // Minimum execution time: 267_018_000 picoseconds. - Weight::from_parts(281_842_630, 9285) - // Standard Error: 453 - .saturating_add(Weight::from_parts(255_373, 0).saturating_mul(r.into())) + // Minimum execution time: 273_588_000 picoseconds. + Weight::from_parts(287_133_565, 9285) + // Standard Error: 799 + .saturating_add(Weight::from_parts(254_114, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -976,10 +999,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `872` // Estimated: `9287` - // Minimum execution time: 258_208_000 picoseconds. - Weight::from_parts(227_686_792, 9287) - // Standard Error: 23 - .saturating_add(Weight::from_parts(989, 0).saturating_mul(n.into())) + // Minimum execution time: 257_843_000 picoseconds. + Weight::from_parts(228_177_495, 9287) + // Standard Error: 24 + .saturating_add(Weight::from_parts(985, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1004,8 +1027,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `856 + r * (45 Β±0)` // Estimated: `9271 + r * (45 Β±0)` - // Minimum execution time: 245_714_000 picoseconds. - Weight::from_parts(272_527_059, 9271) + // Minimum execution time: 248_332_000 picoseconds. + Weight::from_parts(274_998_906, 9271) + // Standard Error: 949_561 + .saturating_add(Weight::from_parts(5_570_693, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(r.into())) @@ -1031,10 +1056,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `866` // Estimated: `9288` - // Minimum execution time: 256_060_000 picoseconds. - Weight::from_parts(276_710_811, 9288) - // Standard Error: 0 - .saturating_add(Weight::from_parts(312, 0).saturating_mul(n.into())) + // Minimum execution time: 272_055_000 picoseconds. + Weight::from_parts(282_280_090, 9288) + // Standard Error: 1 + .saturating_add(Weight::from_parts(330, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1065,10 +1090,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2902 + r * (529 Β±0)` // Estimated: `11317 + r * (5479 Β±0)` - // Minimum execution time: 270_479_000 picoseconds. - Weight::from_parts(296_706_989, 11317) - // Standard Error: 813_407 - .saturating_add(Weight::from_parts(109_236_910, 0).saturating_mul(r.into())) + // Minimum execution time: 274_842_000 picoseconds. + Weight::from_parts(299_824_497, 11317) + // Standard Error: 902_686 + .saturating_add(Weight::from_parts(106_562_902, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1098,10 +1123,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `947 + r * (10 Β±0)` // Estimated: `9363 + r * (10 Β±0)` - // Minimum execution time: 251_787_000 picoseconds. - Weight::from_parts(284_036_313, 9363) - // Standard Error: 2_560 - .saturating_add(Weight::from_parts(1_238_301, 0).saturating_mul(r.into())) + // Minimum execution time: 255_393_000 picoseconds. + Weight::from_parts(309_814_338, 9363) + // Standard Error: 2_978 + .saturating_add(Weight::from_parts(1_203_507, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) @@ -1127,10 +1152,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `866 + r * (10 Β±0)` // Estimated: `9283 + r * (10 Β±0)` - // Minimum execution time: 250_566_000 picoseconds. - Weight::from_parts(275_402_784, 9283) - // Standard Error: 3_939 - .saturating_add(Weight::from_parts(1_941_995, 0).saturating_mul(r.into())) + // Minimum execution time: 250_378_000 picoseconds. + Weight::from_parts(287_003_144, 9283) + // Standard Error: 2_726 + .saturating_add(Weight::from_parts(1_850_967, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) @@ -1157,12 +1182,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `883 + t * (32 Β±0)` // Estimated: `9303 + t * (2508 Β±0)` - // Minimum execution time: 267_544_000 picoseconds. - Weight::from_parts(280_117_825, 9303) - // Standard Error: 102_111 - .saturating_add(Weight::from_parts(3_253_038, 0).saturating_mul(t.into())) - // Standard Error: 28 - .saturating_add(Weight::from_parts(668, 0).saturating_mul(n.into())) + // Minimum execution time: 266_787_000 picoseconds. + Weight::from_parts(284_368_661, 9303) + // Standard Error: 121_315 + .saturating_add(Weight::from_parts(2_690_211, 0).saturating_mul(t.into())) + // Standard Error: 33 + .saturating_add(Weight::from_parts(789, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1190,10 +1215,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `865 + r * (7 Β±0)` // Estimated: `9285 + r * (7 Β±0)` - // Minimum execution time: 157_769_000 picoseconds. - Weight::from_parts(173_026_565, 9285) - // Standard Error: 405 - .saturating_add(Weight::from_parts(219_727, 0).saturating_mul(r.into())) + // Minimum execution time: 166_804_000 picoseconds. + Weight::from_parts(175_118_291, 9285) + // Standard Error: 398 + .saturating_add(Weight::from_parts(220_961, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) @@ -1219,10 +1244,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `125816` // Estimated: `131758` - // Minimum execution time: 403_961_000 picoseconds. - Weight::from_parts(376_480_872, 131758) + // Minimum execution time: 398_436_000 picoseconds. + Weight::from_parts(385_003_285, 131758) // Standard Error: 12 - .saturating_add(Weight::from_parts(1_041, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(1_038, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1233,10 +1258,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `927 + r * (292 Β±0)` // Estimated: `929 + r * (293 Β±0)` - // Minimum execution time: 256_272_000 picoseconds. - Weight::from_parts(181_058_379, 929) - // Standard Error: 9_838 - .saturating_add(Weight::from_parts(6_316_677, 0).saturating_mul(r.into())) + // Minimum execution time: 274_099_000 picoseconds. + Weight::from_parts(174_282_817, 929) + // Standard Error: 10_547 + .saturating_add(Weight::from_parts(6_262_173, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1250,10 +1275,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1450` // Estimated: `1433` - // Minimum execution time: 268_713_000 picoseconds. - Weight::from_parts(328_329_131, 1433) + // Minimum execution time: 274_061_000 picoseconds. + Weight::from_parts(334_755_333, 1433) // Standard Error: 66 - .saturating_add(Weight::from_parts(962, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(771, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } @@ -1264,10 +1289,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1256 + n * (1 Β±0)` // Estimated: `1256 + n * (1 Β±0)` - // Minimum execution time: 265_683_000 picoseconds. - Weight::from_parts(293_784_477, 1256) - // Standard Error: 31 - .saturating_add(Weight::from_parts(377, 0).saturating_mul(n.into())) + // Minimum execution time: 272_657_000 picoseconds. + Weight::from_parts(299_061_594, 1256) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1279,10 +1302,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `924 + r * (288 Β±0)` // Estimated: `930 + r * (289 Β±0)` - // Minimum execution time: 269_959_000 picoseconds. - Weight::from_parts(186_065_911, 930) - // Standard Error: 9_679 - .saturating_add(Weight::from_parts(6_286_855, 0).saturating_mul(r.into())) + // Minimum execution time: 270_524_000 picoseconds. + Weight::from_parts(177_959_667, 930) + // Standard Error: 10_397 + .saturating_add(Weight::from_parts(6_270_181, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1296,8 +1319,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1252 + n * (1 Β±0)` // Estimated: `1252 + n * (1 Β±0)` - // Minimum execution time: 265_805_000 picoseconds. - Weight::from_parts(294_633_695, 1252) + // Minimum execution time: 270_757_000 picoseconds. + Weight::from_parts(298_627_896, 1252) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1309,10 +1332,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `924 + r * (296 Β±0)` // Estimated: `926 + r * (297 Β±0)` - // Minimum execution time: 264_592_000 picoseconds. - Weight::from_parts(204_670_461, 926) - // Standard Error: 6_869 - .saturating_add(Weight::from_parts(5_137_920, 0).saturating_mul(r.into())) + // Minimum execution time: 268_082_000 picoseconds. + Weight::from_parts(202_583_730, 926) + // Standard Error: 9_029 + .saturating_add(Weight::from_parts(5_028_517, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1325,10 +1348,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1268 + n * (1 Β±0)` // Estimated: `1268 + n * (1 Β±0)` - // Minimum execution time: 273_165_000 picoseconds. - Weight::from_parts(297_883_669, 1268) - // Standard Error: 37 - .saturating_add(Weight::from_parts(576, 0).saturating_mul(n.into())) + // Minimum execution time: 274_100_000 picoseconds. + Weight::from_parts(296_981_515, 1268) + // Standard Error: 34 + .saturating_add(Weight::from_parts(578, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1340,10 +1363,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `935 + r * (288 Β±0)` // Estimated: `932 + r * (289 Β±0)` - // Minimum execution time: 252_257_000 picoseconds. - Weight::from_parts(205_018_595, 932) - // Standard Error: 7_605 - .saturating_add(Weight::from_parts(4_933_378, 0).saturating_mul(r.into())) + // Minimum execution time: 269_697_000 picoseconds. + Weight::from_parts(198_346_516, 932) + // Standard Error: 8_623 + .saturating_add(Weight::from_parts(4_964_116, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1356,10 +1379,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1255 + n * (1 Β±0)` // Estimated: `1255 + n * (1 Β±0)` - // Minimum execution time: 266_839_000 picoseconds. - Weight::from_parts(292_064_487, 1255) - // Standard Error: 34 - .saturating_add(Weight::from_parts(194, 0).saturating_mul(n.into())) + // Minimum execution time: 264_210_000 picoseconds. + Weight::from_parts(291_387_652, 1255) + // Standard Error: 36 + .saturating_add(Weight::from_parts(524, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1371,10 +1394,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `917 + r * (296 Β±0)` // Estimated: `922 + r * (297 Β±0)` - // Minimum execution time: 266_072_000 picoseconds. - Weight::from_parts(189_018_352, 922) - // Standard Error: 9_530 - .saturating_add(Weight::from_parts(6_481_011, 0).saturating_mul(r.into())) + // Minimum execution time: 267_219_000 picoseconds. + Weight::from_parts(175_866_982, 922) + // Standard Error: 11_275 + .saturating_add(Weight::from_parts(6_424_755, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1388,10 +1411,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1269 + n * (1 Β±0)` // Estimated: `1269 + n * (1 Β±0)` - // Minimum execution time: 270_703_000 picoseconds. - Weight::from_parts(292_867_105, 1269) - // Standard Error: 30 - .saturating_add(Weight::from_parts(800, 0).saturating_mul(n.into())) + // Minimum execution time: 274_634_000 picoseconds. + Weight::from_parts(294_272_009, 1269) + // Standard Error: 36 + .saturating_add(Weight::from_parts(1_219, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1417,10 +1440,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1418 + r * (45 Β±0)` // Estimated: `9785 + r * (2520 Β±0)` - // Minimum execution time: 251_843_000 picoseconds. - Weight::from_parts(181_026_888, 9785) - // Standard Error: 38_221 - .saturating_add(Weight::from_parts(30_626_681, 0).saturating_mul(r.into())) + // Minimum execution time: 266_833_000 picoseconds. + Weight::from_parts(186_049_741, 9785) + // Standard Error: 40_311 + .saturating_add(Weight::from_parts(31_365_585, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -1448,10 +1471,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1263 + r * (245 Β±0)` // Estimated: `9635 + r * (2721 Β±0)` - // Minimum execution time: 254_881_000 picoseconds. - Weight::from_parts(272_871_000, 9635) - // Standard Error: 94_527 - .saturating_add(Weight::from_parts(233_379_497, 0).saturating_mul(r.into())) + // Minimum execution time: 272_140_000 picoseconds. + Weight::from_parts(275_009_000, 9635) + // Standard Error: 99_187 + .saturating_add(Weight::from_parts(240_702_479, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -1478,11 +1501,11 @@ impl WeightInfo for SubstrateWeight { fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (576 Β±0)` - // Estimated: `9290 + r * (2637 Β±3)` - // Minimum execution time: 266_704_000 picoseconds. - Weight::from_parts(273_165_000, 9290) - // Standard Error: 141_486 - .saturating_add(Weight::from_parts(232_947_049, 0).saturating_mul(r.into())) + // Estimated: `9290 + r * (2637 Β±10)` + // Minimum execution time: 263_664_000 picoseconds. + Weight::from_parts(277_240_000, 9290) + // Standard Error: 169_147 + .saturating_add(Weight::from_parts(240_084_929, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1511,12 +1534,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1310 + t * (277 Β±0)` // Estimated: `12200 + t * (5227 Β±0)` - // Minimum execution time: 446_451_000 picoseconds. - Weight::from_parts(68_175_222, 12200) - // Standard Error: 11_619_250 - .saturating_add(Weight::from_parts(354_237_260, 0).saturating_mul(t.into())) + // Minimum execution time: 464_644_000 picoseconds. + Weight::from_parts(40_377_313, 12200) + // Standard Error: 12_060_226 + .saturating_add(Weight::from_parts(380_635_982, 0).saturating_mul(t.into())) // Standard Error: 17 - .saturating_add(Weight::from_parts(987, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_014, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(16_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -1548,10 +1571,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1281 + r * (255 Β±0)` // Estimated: `9623 + r * (2731 Β±0)` - // Minimum execution time: 628_156_000 picoseconds. - Weight::from_parts(642_052_000, 9623) - // Standard Error: 223_957 - .saturating_add(Weight::from_parts(348_660_264, 0).saturating_mul(r.into())) + // Minimum execution time: 641_845_000 picoseconds. + Weight::from_parts(649_908_000, 9623) + // Standard Error: 278_514 + .saturating_add(Weight::from_parts(361_164_598, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(7_u64)) @@ -1585,14 +1608,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1306 + t * (104 Β±0)` // Estimated: `12214 + t * (2549 Β±1)` - // Minimum execution time: 2_106_379_000 picoseconds. - Weight::from_parts(1_098_227_098, 12214) - // Standard Error: 12_549_729 - .saturating_add(Weight::from_parts(21_628_382, 0).saturating_mul(t.into())) - // Standard Error: 19 - .saturating_add(Weight::from_parts(1_059, 0).saturating_mul(i.into())) - // Standard Error: 19 - .saturating_add(Weight::from_parts(1_220, 0).saturating_mul(s.into())) + // Minimum execution time: 2_111_632_000 picoseconds. + Weight::from_parts(1_213_125_745, 12214) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_055, 0).saturating_mul(i.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_192, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(19_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(11_u64)) @@ -1620,10 +1641,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `865 + r * (8 Β±0)` // Estimated: `9279 + r * (8 Β±0)` - // Minimum execution time: 257_102_000 picoseconds. - Weight::from_parts(278_920_692, 9279) - // Standard Error: 464 - .saturating_add(Weight::from_parts(374_120, 0).saturating_mul(r.into())) + // Minimum execution time: 264_227_000 picoseconds. + Weight::from_parts(281_015_995, 9279) + // Standard Error: 710 + .saturating_add(Weight::from_parts(365_734, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) @@ -1649,10 +1670,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `873` // Estimated: `9286` - // Minimum execution time: 266_609_000 picoseconds. - Weight::from_parts(263_034_132, 9286) + // Minimum execution time: 270_709_000 picoseconds. + Weight::from_parts(268_416_051, 9286) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_082, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_080, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1677,10 +1698,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `867 + r * (8 Β±0)` // Estimated: `9284 + r * (8 Β±0)` - // Minimum execution time: 246_275_000 picoseconds. - Weight::from_parts(279_091_594, 9284) - // Standard Error: 887 - .saturating_add(Weight::from_parts(810_394, 0).saturating_mul(r.into())) + // Minimum execution time: 267_283_000 picoseconds. + Weight::from_parts(280_706_659, 9284) + // Standard Error: 540 + .saturating_add(Weight::from_parts(790_312, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) @@ -1706,10 +1727,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `875` // Estimated: `9292` - // Minimum execution time: 249_254_000 picoseconds. - Weight::from_parts(270_833_823, 9292) - // Standard Error: 0 - .saturating_add(Weight::from_parts(3_347, 0).saturating_mul(n.into())) + // Minimum execution time: 262_010_000 picoseconds. + Weight::from_parts(273_278_744, 9292) + // Standard Error: 1 + .saturating_add(Weight::from_parts(3_362, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1734,10 +1755,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `867 + r * (8 Β±0)` // Estimated: `9286 + r * (8 Β±0)` - // Minimum execution time: 259_131_000 picoseconds. - Weight::from_parts(272_665_020, 9286) - // Standard Error: 862 - .saturating_add(Weight::from_parts(451_247, 0).saturating_mul(r.into())) + // Minimum execution time: 268_698_000 picoseconds. + Weight::from_parts(282_890_578, 9286) + // Standard Error: 622 + .saturating_add(Weight::from_parts(441_131, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) @@ -1763,10 +1784,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `875` // Estimated: `9291` - // Minimum execution time: 276_465_000 picoseconds. - Weight::from_parts(269_450_297, 9291) + // Minimum execution time: 252_846_000 picoseconds. + Weight::from_parts(267_657_561, 9291) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_190, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_208, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1791,10 +1812,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `867 + r * (8 Β±0)` // Estimated: `9283 + r * (8 Β±0)` - // Minimum execution time: 248_349_000 picoseconds. - Weight::from_parts(273_660_686, 9283) - // Standard Error: 656 - .saturating_add(Weight::from_parts(444_293, 0).saturating_mul(r.into())) + // Minimum execution time: 266_899_000 picoseconds. + Weight::from_parts(276_862_067, 9283) + // Standard Error: 1_249 + .saturating_add(Weight::from_parts(443_970, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) @@ -1820,10 +1841,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `875` // Estimated: `9289` - // Minimum execution time: 252_107_000 picoseconds. - Weight::from_parts(267_427_726, 9289) + // Minimum execution time: 265_413_000 picoseconds. + Weight::from_parts(265_600_840, 9289) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_199, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_203, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -1848,10 +1869,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1000 + n * (1 Β±0)` // Estimated: `9412 + n * (1 Β±0)` - // Minimum execution time: 311_252_000 picoseconds. - Weight::from_parts(333_250_773, 9412) - // Standard Error: 12 - .saturating_add(Weight::from_parts(5_668, 0).saturating_mul(n.into())) + // Minimum execution time: 328_341_000 picoseconds. + Weight::from_parts(341_038_581, 9412) + // Standard Error: 10 + .saturating_add(Weight::from_parts(5_863, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1875,12 +1896,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 160]`. fn seal_sr25519_verify(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `810 + r * (112 Β±0)` + // Measured: `808 + r * (112 Β±0)` // Estimated: `9226 + r * (112 Β±0)` - // Minimum execution time: 267_431_000 picoseconds. - Weight::from_parts(300_733_048, 9226) - // Standard Error: 8_806 - .saturating_add(Weight::from_parts(42_169_197, 0).saturating_mul(r.into())) + // Minimum execution time: 272_730_000 picoseconds. + Weight::from_parts(339_349_232, 9226) + // Standard Error: 13_004 + .saturating_add(Weight::from_parts(41_649_026, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(r.into())) @@ -1906,10 +1927,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `910 + r * (76 Β±0)` // Estimated: `9279 + r * (77 Β±0)` - // Minimum execution time: 265_905_000 picoseconds. - Weight::from_parts(305_381_951, 9279) - // Standard Error: 9_863 - .saturating_add(Weight::from_parts(45_771_182, 0).saturating_mul(r.into())) + // Minimum execution time: 273_892_000 picoseconds. + Weight::from_parts(352_853_960, 9279) + // Standard Error: 16_745 + .saturating_add(Weight::from_parts(45_853_294, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 77).saturating_mul(r.into())) @@ -1935,10 +1956,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `880 + r * (42 Β±0)` // Estimated: `9294 + r * (42 Β±0)` - // Minimum execution time: 266_701_000 picoseconds. - Weight::from_parts(292_407_888, 9294) - // Standard Error: 5_394 - .saturating_add(Weight::from_parts(11_896_065, 0).saturating_mul(r.into())) + // Minimum execution time: 268_431_000 picoseconds. + Weight::from_parts(319_727_796, 9294) + // Standard Error: 10_276 + .saturating_add(Weight::from_parts(11_928_882, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 42).saturating_mul(r.into())) @@ -1964,10 +1985,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + r * (965 Β±0)` // Estimated: `9285 + r * (3090 Β±7)` - // Minimum execution time: 268_755_000 picoseconds. - Weight::from_parts(272_300_000, 9285) - // Standard Error: 43_747 - .saturating_add(Weight::from_parts(25_001_889, 0).saturating_mul(r.into())) + // Minimum execution time: 275_482_000 picoseconds. + Weight::from_parts(279_155_000, 9285) + // Standard Error: 65_388 + .saturating_add(Weight::from_parts(26_634_187, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -1995,10 +2016,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `937 + r * (131 Β±0)` // Estimated: `9346 + r * (2607 Β±0)` - // Minimum execution time: 255_190_000 picoseconds. - Weight::from_parts(285_643_922, 9346) - // Standard Error: 23_582 - .saturating_add(Weight::from_parts(6_289_940, 0).saturating_mul(r.into())) + // Minimum execution time: 270_001_000 picoseconds. + Weight::from_parts(286_792_689, 9346) + // Standard Error: 21_074 + .saturating_add(Weight::from_parts(6_465_885, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -2026,10 +2047,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `972 + r * (184 Β±0)` // Estimated: `129453 + r * (2568 Β±0)` - // Minimum execution time: 260_061_000 picoseconds. - Weight::from_parts(286_849_343, 129453) - // Standard Error: 22_406 - .saturating_add(Weight::from_parts(5_661_459, 0).saturating_mul(r.into())) + // Minimum execution time: 270_652_000 picoseconds. + Weight::from_parts(293_369_452, 129453) + // Standard Error: 24_321 + .saturating_add(Weight::from_parts(5_575_600, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -2057,10 +2078,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `861 + r * (3 Β±0)` // Estimated: `9282 + r * (3 Β±0)` - // Minimum execution time: 250_922_000 picoseconds. - Weight::from_parts(277_294_913, 9282) - // Standard Error: 391 - .saturating_add(Weight::from_parts(169_457, 0).saturating_mul(r.into())) + // Minimum execution time: 267_749_000 picoseconds. + Weight::from_parts(280_373_341, 9282) + // Standard Error: 464 + .saturating_add(Weight::from_parts(170_398, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) @@ -2086,10 +2107,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2112 + r * (39 Β±0)` // Estimated: `10377 + r * (40 Β±0)` - // Minimum execution time: 267_293_000 picoseconds. - Weight::from_parts(309_125_635, 10377) - // Standard Error: 637 - .saturating_add(Weight::from_parts(245_508, 0).saturating_mul(r.into())) + // Minimum execution time: 270_260_000 picoseconds. + Weight::from_parts(337_969_172, 10377) + // Standard Error: 1_557 + .saturating_add(Weight::from_parts(305_735, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 40).saturating_mul(r.into())) @@ -2117,10 +2138,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `864 + r * (3 Β±0)` // Estimated: `9279 + r * (3 Β±0)` - // Minimum execution time: 256_057_000 picoseconds. - Weight::from_parts(278_068_870, 9279) - // Standard Error: 335 - .saturating_add(Weight::from_parts(152_083, 0).saturating_mul(r.into())) + // Minimum execution time: 265_607_000 picoseconds. + Weight::from_parts(283_396_630, 9279) + // Standard Error: 449 + .saturating_add(Weight::from_parts(149_018, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) @@ -2130,10 +2151,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_846_000 picoseconds. - Weight::from_parts(1_808_029, 0) - // Standard Error: 9 - .saturating_add(Weight::from_parts(14_937, 0).saturating_mul(r.into())) + // Minimum execution time: 1_813_000 picoseconds. + Weight::from_parts(1_264_945, 0) + // Standard Error: 19 + .saturating_add(Weight::from_parts(15_098, 0).saturating_mul(r.into())) } } @@ -2145,8 +2166,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_103_000 picoseconds. - Weight::from_parts(2_260_000, 1627) + // Minimum execution time: 2_054_000 picoseconds. + Weight::from_parts(2_178_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -2156,10 +2177,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `452 + k * (69 Β±0)` // Estimated: `442 + k * (70 Β±0)` - // Minimum execution time: 12_173_000 picoseconds. - Weight::from_parts(12_515_000, 442) - // Standard Error: 1_033 - .saturating_add(Weight::from_parts(1_075_765, 0).saturating_mul(k.into())) + // Minimum execution time: 12_119_000 picoseconds. + Weight::from_parts(12_536_000, 442) + // Standard Error: 1_254 + .saturating_add(Weight::from_parts(1_161_885, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -2173,10 +2194,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211 + c * (1 Β±0)` // Estimated: `6149 + c * (1 Β±0)` - // Minimum execution time: 8_054_000 picoseconds. - Weight::from_parts(8_503_485, 6149) + // Minimum execution time: 8_146_000 picoseconds. + Weight::from_parts(8_560_745, 6149) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_184, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_182, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -2189,8 +2210,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_966_000 picoseconds. - Weight::from_parts(17_445_000, 6450) + // Minimum execution time: 16_183_000 picoseconds. + Weight::from_parts(16_825_000, 6450) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2203,10 +2224,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `171 + k * (1 Β±0)` // Estimated: `3635 + k * (1 Β±0)` - // Minimum execution time: 3_643_000 picoseconds. - Weight::from_parts(3_714_000, 3635) - // Standard Error: 1_056 - .saturating_add(Weight::from_parts(1_089_042, 0).saturating_mul(k.into())) + // Minimum execution time: 3_645_000 picoseconds. + Weight::from_parts(604_365, 3635) + // Standard Error: 1_013 + .saturating_add(Weight::from_parts(1_100_277, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -2227,10 +2248,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `328 + c * (1 Β±0)` // Estimated: `6266 + c * (1 Β±0)` - // Minimum execution time: 19_891_000 picoseconds. - Weight::from_parts(19_961_608, 6266) + // Minimum execution time: 19_500_000 picoseconds. + Weight::from_parts(20_074_895, 6266) // Standard Error: 1 - .saturating_add(Weight::from_parts(429, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(422, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -2241,8 +2262,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 12_483_000 picoseconds. - Weight::from_parts(13_205_000, 6380) + // Minimum execution time: 12_530_000 picoseconds. + Weight::from_parts(13_362_000, 6380) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2256,8 +2277,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 42_443_000 picoseconds. - Weight::from_parts(43_420_000, 6292) + // Minimum execution time: 44_275_000 picoseconds. + Weight::from_parts(45_718_000, 6292) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2269,8 +2290,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 52_282_000 picoseconds. - Weight::from_parts(54_110_000, 6534) + // Minimum execution time: 55_095_000 picoseconds. + Weight::from_parts(58_009_000, 6534) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -2280,8 +2301,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_539_000 picoseconds. - Weight::from_parts(2_644_000, 1627) + // Minimum execution time: 2_455_000 picoseconds. + Weight::from_parts(2_608_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2293,8 +2314,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 9_473_000 picoseconds. - Weight::from_parts(9_907_000, 3631) + // Minimum execution time: 11_207_000 picoseconds. + Weight::from_parts(11_802_000, 3631) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -2304,8 +2325,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 3_532_000 picoseconds. - Weight::from_parts(3_768_000, 3607) + // Minimum execution time: 4_581_000 picoseconds. + Weight::from_parts(4_781_000, 3607) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -2316,8 +2337,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 5_036_000 picoseconds. - Weight::from_parts(5_208_000, 3632) + // Minimum execution time: 5_887_000 picoseconds. + Weight::from_parts(6_393_000, 3632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -2328,8 +2349,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_881_000 picoseconds. - Weight::from_parts(5_149_000, 3607) + // Minimum execution time: 6_025_000 picoseconds. + Weight::from_parts(6_298_000, 3607) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2354,10 +2375,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `804 + c * (1 Β±0)` // Estimated: `9217 + c * (1 Β±0)` - // Minimum execution time: 280_047_000 picoseconds. - Weight::from_parts(270_065_882, 9217) - // Standard Error: 86 - .saturating_add(Weight::from_parts(34_361, 0).saturating_mul(c.into())) + // Minimum execution time: 294_976_000 picoseconds. + Weight::from_parts(277_235_948, 9217) + // Standard Error: 65 + .saturating_add(Weight::from_parts(34_445, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -2389,14 +2410,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `8740` - // Minimum execution time: 3_776_071_000 picoseconds. - Weight::from_parts(706_212_213, 8740) - // Standard Error: 149 - .saturating_add(Weight::from_parts(98_798, 0).saturating_mul(c.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(1_560, 0).saturating_mul(i.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(1_476, 0).saturating_mul(s.into())) + // Minimum execution time: 3_864_558_000 picoseconds. + Weight::from_parts(421_676_889, 8740) + // Standard Error: 188 + .saturating_add(Weight::from_parts(103_788, 0).saturating_mul(c.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_715, 0).saturating_mul(i.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_774, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } @@ -2426,12 +2447,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `563` // Estimated: `8982` - // Minimum execution time: 1_966_301_000 picoseconds. - Weight::from_parts(376_287_434, 8982) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_643, 0).saturating_mul(i.into())) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_667, 0).saturating_mul(s.into())) + // Minimum execution time: 2_069_276_000 picoseconds. + Weight::from_parts(377_210_279, 8982) + // Standard Error: 9 + .saturating_add(Weight::from_parts(1_719, 0).saturating_mul(i.into())) + // Standard Error: 9 + .saturating_add(Weight::from_parts(1_728, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -2455,8 +2476,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `829` // Estimated: `9244` - // Minimum execution time: 197_571_000 picoseconds. - Weight::from_parts(206_612_000, 9244) + // Minimum execution time: 199_037_000 picoseconds. + Weight::from_parts(213_931_000, 9244) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -2473,14 +2494,38 @@ impl WeightInfo for () { /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. - fn upload_code(c: u32, ) -> Weight { + fn upload_code_determinism_enforced(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6085` - // Minimum execution time: 266_006_000 picoseconds. - Weight::from_parts(287_700_788, 6085) - // Standard Error: 67 - .saturating_add(Weight::from_parts(63_196, 0).saturating_mul(c.into())) + // Minimum execution time: 278_482_000 picoseconds. + Weight::from_parts(262_753_879, 6085) + // Standard Error: 112 + .saturating_add(Weight::from_parts(66_129, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) + /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `System::EventTopics` (r:1 w:1) + /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Contracts::PristineCode` (r:0 w:1) + /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// The range of component `c` is `[0, 125952]`. + fn upload_code_determinism_relaxed(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `6085` + // Minimum execution time: 286_902_000 picoseconds. + Weight::from_parts(269_003_959, 6085) + // Standard Error: 123 + .saturating_add(Weight::from_parts(66_629, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -2498,8 +2543,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 42_714_000 picoseconds. - Weight::from_parts(44_713_000, 3780) + // Minimum execution time: 44_128_000 picoseconds. + Weight::from_parts(46_044_000, 3780) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -2515,8 +2560,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `552` // Estimated: `8967` - // Minimum execution time: 33_516_000 picoseconds. - Weight::from_parts(34_823_000, 8967) + // Minimum execution time: 33_567_000 picoseconds. + Weight::from_parts(35_057_000, 8967) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -2541,10 +2586,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `869 + r * (6 Β±0)` // Estimated: `9284 + r * (6 Β±0)` - // Minimum execution time: 246_563_000 picoseconds. - Weight::from_parts(274_999_814, 9284) - // Standard Error: 628 - .saturating_add(Weight::from_parts(347_395, 0).saturating_mul(r.into())) + // Minimum execution time: 272_376_000 picoseconds. + Weight::from_parts(284_162_161, 9284) + // Standard Error: 501 + .saturating_add(Weight::from_parts(341_575, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2570,10 +2615,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `925 + r * (209 Β±0)` // Estimated: `9304 + r * (2684 Β±0)` - // Minimum execution time: 252_236_000 picoseconds. - Weight::from_parts(121_390_081, 9304) - // Standard Error: 6_206 - .saturating_add(Weight::from_parts(3_558_718, 0).saturating_mul(r.into())) + // Minimum execution time: 256_990_000 picoseconds. + Weight::from_parts(107_167_044, 9304) + // Standard Error: 7_545 + .saturating_add(Weight::from_parts(3_628_112, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -2600,10 +2645,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `924 + r * (213 Β±0)` // Estimated: `9308 + r * (2688 Β±0)` - // Minimum execution time: 269_427_000 picoseconds. - Weight::from_parts(134_879_389, 9308) - // Standard Error: 6_711 - .saturating_add(Weight::from_parts(4_408_658, 0).saturating_mul(r.into())) + // Minimum execution time: 272_889_000 picoseconds. + Weight::from_parts(110_341_799, 9308) + // Standard Error: 7_881 + .saturating_add(Weight::from_parts(4_427_043, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -2630,10 +2675,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `876 + r * (6 Β±0)` // Estimated: `9293 + r * (6 Β±0)` - // Minimum execution time: 249_382_000 picoseconds. - Weight::from_parts(266_305_110, 9293) - // Standard Error: 1_592 - .saturating_add(Weight::from_parts(460_001, 0).saturating_mul(r.into())) + // Minimum execution time: 275_027_000 picoseconds. + Weight::from_parts(283_302_223, 9293) + // Standard Error: 1_179 + .saturating_add(Weight::from_parts(436_023, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2659,10 +2704,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `866 + r * (3 Β±0)` // Estimated: `9282 + r * (3 Β±0)` - // Minimum execution time: 252_145_000 picoseconds. - Weight::from_parts(277_211_668, 9282) - // Standard Error: 371 - .saturating_add(Weight::from_parts(174_394, 0).saturating_mul(r.into())) + // Minimum execution time: 270_137_000 picoseconds. + Weight::from_parts(282_756_362, 9282) + // Standard Error: 384 + .saturating_add(Weight::from_parts(171_660, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) @@ -2686,10 +2731,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `756 + r * (3 Β±0)` // Estimated: `9171 + r * (3 Β±0)` - // Minimum execution time: 251_595_000 picoseconds. - Weight::from_parts(268_102_575, 9171) - // Standard Error: 334 - .saturating_add(Weight::from_parts(154_836, 0).saturating_mul(r.into())) + // Minimum execution time: 255_131_000 picoseconds. + Weight::from_parts(269_207_006, 9171) + // Standard Error: 542 + .saturating_add(Weight::from_parts(161_597, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) @@ -2715,10 +2760,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `870 + r * (6 Β±0)` // Estimated: `9285 + r * (6 Β±0)` - // Minimum execution time: 251_404_000 picoseconds. - Weight::from_parts(278_747_574, 9285) - // Standard Error: 782 - .saturating_add(Weight::from_parts(341_598, 0).saturating_mul(r.into())) + // Minimum execution time: 272_172_000 picoseconds. + Weight::from_parts(283_747_134, 9285) + // Standard Error: 885 + .saturating_add(Weight::from_parts(343_968, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2744,10 +2789,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `866 + r * (6 Β±0)` // Estimated: `9284 + r * (6 Β±0)` - // Minimum execution time: 255_649_000 picoseconds. - Weight::from_parts(276_509_641, 9284) - // Standard Error: 1_262 - .saturating_add(Weight::from_parts(380_225, 0).saturating_mul(r.into())) + // Minimum execution time: 263_220_000 picoseconds. + Weight::from_parts(277_062_382, 9284) + // Standard Error: 1_783 + .saturating_add(Weight::from_parts(399_631, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2773,10 +2818,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1010 + r * (6 Β±0)` // Estimated: `9409 + r * (6 Β±0)` - // Minimum execution time: 267_143_000 picoseconds. - Weight::from_parts(298_166_116, 9409) - // Standard Error: 3_765 - .saturating_add(Weight::from_parts(1_620_886, 0).saturating_mul(r.into())) + // Minimum execution time: 253_218_000 picoseconds. + Weight::from_parts(282_251_452, 9409) + // Standard Error: 2_367 + .saturating_add(Weight::from_parts(1_604_060, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2802,10 +2847,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `880 + r * (6 Β±0)` // Estimated: `9301 + r * (6 Β±0)` - // Minimum execution time: 250_013_000 picoseconds. - Weight::from_parts(281_469_583, 9301) - // Standard Error: 730 - .saturating_add(Weight::from_parts(339_260, 0).saturating_mul(r.into())) + // Minimum execution time: 271_797_000 picoseconds. + Weight::from_parts(288_594_393, 9301) + // Standard Error: 846 + .saturating_add(Weight::from_parts(335_063, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2831,10 +2876,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `878 + r * (6 Β±0)` // Estimated: `9294 + r * (6 Β±0)` - // Minimum execution time: 256_219_000 picoseconds. - Weight::from_parts(275_309_266, 9294) - // Standard Error: 1_199 - .saturating_add(Weight::from_parts(350_824, 0).saturating_mul(r.into())) + // Minimum execution time: 254_997_000 picoseconds. + Weight::from_parts(284_331_894, 9294) + // Standard Error: 885 + .saturating_add(Weight::from_parts(337_073, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2860,10 +2905,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `875 + r * (6 Β±0)` // Estimated: `9297 + r * (6 Β±0)` - // Minimum execution time: 264_644_000 picoseconds. - Weight::from_parts(283_856_744, 9297) - // Standard Error: 623 - .saturating_add(Weight::from_parts(334_175, 0).saturating_mul(r.into())) + // Minimum execution time: 273_845_000 picoseconds. + Weight::from_parts(285_291_242, 9297) + // Standard Error: 690 + .saturating_add(Weight::from_parts(332_783, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2889,10 +2934,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `866 + r * (6 Β±0)` // Estimated: `9282 + r * (6 Β±0)` - // Minimum execution time: 263_364_000 picoseconds. - Weight::from_parts(281_379_508, 9282) - // Standard Error: 639 - .saturating_add(Weight::from_parts(338_021, 0).saturating_mul(r.into())) + // Minimum execution time: 268_302_000 picoseconds. + Weight::from_parts(280_463_257, 9282) + // Standard Error: 1_035 + .saturating_add(Weight::from_parts(345_811, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2920,10 +2965,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `940 + r * (14 Β±0)` // Estimated: `9350 + r * (14 Β±0)` - // Minimum execution time: 269_667_000 picoseconds. - Weight::from_parts(284_662_802, 9350) - // Standard Error: 691 - .saturating_add(Weight::from_parts(799_249, 0).saturating_mul(r.into())) + // Minimum execution time: 263_433_000 picoseconds. + Weight::from_parts(290_098_370, 9350) + // Standard Error: 1_905 + .saturating_add(Weight::from_parts(805_299, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 14).saturating_mul(r.into())) @@ -2949,10 +2994,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `868 + r * (6 Β±0)` // Estimated: `9285 + r * (6 Β±0)` - // Minimum execution time: 267_018_000 picoseconds. - Weight::from_parts(281_842_630, 9285) - // Standard Error: 453 - .saturating_add(Weight::from_parts(255_373, 0).saturating_mul(r.into())) + // Minimum execution time: 273_588_000 picoseconds. + Weight::from_parts(287_133_565, 9285) + // Standard Error: 799 + .saturating_add(Weight::from_parts(254_114, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 6).saturating_mul(r.into())) @@ -2978,10 +3023,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `872` // Estimated: `9287` - // Minimum execution time: 258_208_000 picoseconds. - Weight::from_parts(227_686_792, 9287) - // Standard Error: 23 - .saturating_add(Weight::from_parts(989, 0).saturating_mul(n.into())) + // Minimum execution time: 257_843_000 picoseconds. + Weight::from_parts(228_177_495, 9287) + // Standard Error: 24 + .saturating_add(Weight::from_parts(985, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -3006,8 +3051,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `856 + r * (45 Β±0)` // Estimated: `9271 + r * (45 Β±0)` - // Minimum execution time: 245_714_000 picoseconds. - Weight::from_parts(272_527_059, 9271) + // Minimum execution time: 248_332_000 picoseconds. + Weight::from_parts(274_998_906, 9271) + // Standard Error: 949_561 + .saturating_add(Weight::from_parts(5_570_693, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 45).saturating_mul(r.into())) @@ -3033,10 +3080,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `866` // Estimated: `9288` - // Minimum execution time: 256_060_000 picoseconds. - Weight::from_parts(276_710_811, 9288) - // Standard Error: 0 - .saturating_add(Weight::from_parts(312, 0).saturating_mul(n.into())) + // Minimum execution time: 272_055_000 picoseconds. + Weight::from_parts(282_280_090, 9288) + // Standard Error: 1 + .saturating_add(Weight::from_parts(330, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -3067,10 +3114,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2902 + r * (529 Β±0)` // Estimated: `11317 + r * (5479 Β±0)` - // Minimum execution time: 270_479_000 picoseconds. - Weight::from_parts(296_706_989, 11317) - // Standard Error: 813_407 - .saturating_add(Weight::from_parts(109_236_910, 0).saturating_mul(r.into())) + // Minimum execution time: 274_842_000 picoseconds. + Weight::from_parts(299_824_497, 11317) + // Standard Error: 902_686 + .saturating_add(Weight::from_parts(106_562_902, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3100,10 +3147,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `947 + r * (10 Β±0)` // Estimated: `9363 + r * (10 Β±0)` - // Minimum execution time: 251_787_000 picoseconds. - Weight::from_parts(284_036_313, 9363) - // Standard Error: 2_560 - .saturating_add(Weight::from_parts(1_238_301, 0).saturating_mul(r.into())) + // Minimum execution time: 255_393_000 picoseconds. + Weight::from_parts(309_814_338, 9363) + // Standard Error: 2_978 + .saturating_add(Weight::from_parts(1_203_507, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) @@ -3129,10 +3176,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `866 + r * (10 Β±0)` // Estimated: `9283 + r * (10 Β±0)` - // Minimum execution time: 250_566_000 picoseconds. - Weight::from_parts(275_402_784, 9283) - // Standard Error: 3_939 - .saturating_add(Weight::from_parts(1_941_995, 0).saturating_mul(r.into())) + // Minimum execution time: 250_378_000 picoseconds. + Weight::from_parts(287_003_144, 9283) + // Standard Error: 2_726 + .saturating_add(Weight::from_parts(1_850_967, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 10).saturating_mul(r.into())) @@ -3159,12 +3206,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `883 + t * (32 Β±0)` // Estimated: `9303 + t * (2508 Β±0)` - // Minimum execution time: 267_544_000 picoseconds. - Weight::from_parts(280_117_825, 9303) - // Standard Error: 102_111 - .saturating_add(Weight::from_parts(3_253_038, 0).saturating_mul(t.into())) - // Standard Error: 28 - .saturating_add(Weight::from_parts(668, 0).saturating_mul(n.into())) + // Minimum execution time: 266_787_000 picoseconds. + Weight::from_parts(284_368_661, 9303) + // Standard Error: 121_315 + .saturating_add(Weight::from_parts(2_690_211, 0).saturating_mul(t.into())) + // Standard Error: 33 + .saturating_add(Weight::from_parts(789, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3192,10 +3239,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `865 + r * (7 Β±0)` // Estimated: `9285 + r * (7 Β±0)` - // Minimum execution time: 157_769_000 picoseconds. - Weight::from_parts(173_026_565, 9285) - // Standard Error: 405 - .saturating_add(Weight::from_parts(219_727, 0).saturating_mul(r.into())) + // Minimum execution time: 166_804_000 picoseconds. + Weight::from_parts(175_118_291, 9285) + // Standard Error: 398 + .saturating_add(Weight::from_parts(220_961, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) @@ -3221,10 +3268,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `125816` // Estimated: `131758` - // Minimum execution time: 403_961_000 picoseconds. - Weight::from_parts(376_480_872, 131758) + // Minimum execution time: 398_436_000 picoseconds. + Weight::from_parts(385_003_285, 131758) // Standard Error: 12 - .saturating_add(Weight::from_parts(1_041, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(1_038, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -3235,10 +3282,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `927 + r * (292 Β±0)` // Estimated: `929 + r * (293 Β±0)` - // Minimum execution time: 256_272_000 picoseconds. - Weight::from_parts(181_058_379, 929) - // Standard Error: 9_838 - .saturating_add(Weight::from_parts(6_316_677, 0).saturating_mul(r.into())) + // Minimum execution time: 274_099_000 picoseconds. + Weight::from_parts(174_282_817, 929) + // Standard Error: 10_547 + .saturating_add(Weight::from_parts(6_262_173, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3252,10 +3299,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1450` // Estimated: `1433` - // Minimum execution time: 268_713_000 picoseconds. - Weight::from_parts(328_329_131, 1433) + // Minimum execution time: 274_061_000 picoseconds. + Weight::from_parts(334_755_333, 1433) // Standard Error: 66 - .saturating_add(Weight::from_parts(962, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(771, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } @@ -3266,10 +3313,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1256 + n * (1 Β±0)` // Estimated: `1256 + n * (1 Β±0)` - // Minimum execution time: 265_683_000 picoseconds. - Weight::from_parts(293_784_477, 1256) - // Standard Error: 31 - .saturating_add(Weight::from_parts(377, 0).saturating_mul(n.into())) + // Minimum execution time: 272_657_000 picoseconds. + Weight::from_parts(299_061_594, 1256) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -3281,10 +3326,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `924 + r * (288 Β±0)` // Estimated: `930 + r * (289 Β±0)` - // Minimum execution time: 269_959_000 picoseconds. - Weight::from_parts(186_065_911, 930) - // Standard Error: 9_679 - .saturating_add(Weight::from_parts(6_286_855, 0).saturating_mul(r.into())) + // Minimum execution time: 270_524_000 picoseconds. + Weight::from_parts(177_959_667, 930) + // Standard Error: 10_397 + .saturating_add(Weight::from_parts(6_270_181, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3298,8 +3343,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1252 + n * (1 Β±0)` // Estimated: `1252 + n * (1 Β±0)` - // Minimum execution time: 265_805_000 picoseconds. - Weight::from_parts(294_633_695, 1252) + // Minimum execution time: 270_757_000 picoseconds. + Weight::from_parts(298_627_896, 1252) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -3311,10 +3356,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `924 + r * (296 Β±0)` // Estimated: `926 + r * (297 Β±0)` - // Minimum execution time: 264_592_000 picoseconds. - Weight::from_parts(204_670_461, 926) - // Standard Error: 6_869 - .saturating_add(Weight::from_parts(5_137_920, 0).saturating_mul(r.into())) + // Minimum execution time: 268_082_000 picoseconds. + Weight::from_parts(202_583_730, 926) + // Standard Error: 9_029 + .saturating_add(Weight::from_parts(5_028_517, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3327,10 +3372,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1268 + n * (1 Β±0)` // Estimated: `1268 + n * (1 Β±0)` - // Minimum execution time: 273_165_000 picoseconds. - Weight::from_parts(297_883_669, 1268) - // Standard Error: 37 - .saturating_add(Weight::from_parts(576, 0).saturating_mul(n.into())) + // Minimum execution time: 274_100_000 picoseconds. + Weight::from_parts(296_981_515, 1268) + // Standard Error: 34 + .saturating_add(Weight::from_parts(578, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -3342,10 +3387,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `935 + r * (288 Β±0)` // Estimated: `932 + r * (289 Β±0)` - // Minimum execution time: 252_257_000 picoseconds. - Weight::from_parts(205_018_595, 932) - // Standard Error: 7_605 - .saturating_add(Weight::from_parts(4_933_378, 0).saturating_mul(r.into())) + // Minimum execution time: 269_697_000 picoseconds. + Weight::from_parts(198_346_516, 932) + // Standard Error: 8_623 + .saturating_add(Weight::from_parts(4_964_116, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3358,10 +3403,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1255 + n * (1 Β±0)` // Estimated: `1255 + n * (1 Β±0)` - // Minimum execution time: 266_839_000 picoseconds. - Weight::from_parts(292_064_487, 1255) - // Standard Error: 34 - .saturating_add(Weight::from_parts(194, 0).saturating_mul(n.into())) + // Minimum execution time: 264_210_000 picoseconds. + Weight::from_parts(291_387_652, 1255) + // Standard Error: 36 + .saturating_add(Weight::from_parts(524, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -3373,10 +3418,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `917 + r * (296 Β±0)` // Estimated: `922 + r * (297 Β±0)` - // Minimum execution time: 266_072_000 picoseconds. - Weight::from_parts(189_018_352, 922) - // Standard Error: 9_530 - .saturating_add(Weight::from_parts(6_481_011, 0).saturating_mul(r.into())) + // Minimum execution time: 267_219_000 picoseconds. + Weight::from_parts(175_866_982, 922) + // Standard Error: 11_275 + .saturating_add(Weight::from_parts(6_424_755, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3390,10 +3435,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1269 + n * (1 Β±0)` // Estimated: `1269 + n * (1 Β±0)` - // Minimum execution time: 270_703_000 picoseconds. - Weight::from_parts(292_867_105, 1269) - // Standard Error: 30 - .saturating_add(Weight::from_parts(800, 0).saturating_mul(n.into())) + // Minimum execution time: 274_634_000 picoseconds. + Weight::from_parts(294_272_009, 1269) + // Standard Error: 36 + .saturating_add(Weight::from_parts(1_219, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -3419,10 +3464,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1418 + r * (45 Β±0)` // Estimated: `9785 + r * (2520 Β±0)` - // Minimum execution time: 251_843_000 picoseconds. - Weight::from_parts(181_026_888, 9785) - // Standard Error: 38_221 - .saturating_add(Weight::from_parts(30_626_681, 0).saturating_mul(r.into())) + // Minimum execution time: 266_833_000 picoseconds. + Weight::from_parts(186_049_741, 9785) + // Standard Error: 40_311 + .saturating_add(Weight::from_parts(31_365_585, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -3450,10 +3495,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1263 + r * (245 Β±0)` // Estimated: `9635 + r * (2721 Β±0)` - // Minimum execution time: 254_881_000 picoseconds. - Weight::from_parts(272_871_000, 9635) - // Standard Error: 94_527 - .saturating_add(Weight::from_parts(233_379_497, 0).saturating_mul(r.into())) + // Minimum execution time: 272_140_000 picoseconds. + Weight::from_parts(275_009_000, 9635) + // Standard Error: 99_187 + .saturating_add(Weight::from_parts(240_702_479, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -3480,11 +3525,11 @@ impl WeightInfo for () { fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (576 Β±0)` - // Estimated: `9290 + r * (2637 Β±3)` - // Minimum execution time: 266_704_000 picoseconds. - Weight::from_parts(273_165_000, 9290) - // Standard Error: 141_486 - .saturating_add(Weight::from_parts(232_947_049, 0).saturating_mul(r.into())) + // Estimated: `9290 + r * (2637 Β±10)` + // Minimum execution time: 263_664_000 picoseconds. + Weight::from_parts(277_240_000, 9290) + // Standard Error: 169_147 + .saturating_add(Weight::from_parts(240_084_929, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3513,12 +3558,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1310 + t * (277 Β±0)` // Estimated: `12200 + t * (5227 Β±0)` - // Minimum execution time: 446_451_000 picoseconds. - Weight::from_parts(68_175_222, 12200) - // Standard Error: 11_619_250 - .saturating_add(Weight::from_parts(354_237_260, 0).saturating_mul(t.into())) + // Minimum execution time: 464_644_000 picoseconds. + Weight::from_parts(40_377_313, 12200) + // Standard Error: 12_060_226 + .saturating_add(Weight::from_parts(380_635_982, 0).saturating_mul(t.into())) // Standard Error: 17 - .saturating_add(Weight::from_parts(987, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_014, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(16_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -3550,10 +3595,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1281 + r * (255 Β±0)` // Estimated: `9623 + r * (2731 Β±0)` - // Minimum execution time: 628_156_000 picoseconds. - Weight::from_parts(642_052_000, 9623) - // Standard Error: 223_957 - .saturating_add(Weight::from_parts(348_660_264, 0).saturating_mul(r.into())) + // Minimum execution time: 641_845_000 picoseconds. + Weight::from_parts(649_908_000, 9623) + // Standard Error: 278_514 + .saturating_add(Weight::from_parts(361_164_598, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(7_u64)) @@ -3587,14 +3632,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1306 + t * (104 Β±0)` // Estimated: `12214 + t * (2549 Β±1)` - // Minimum execution time: 2_106_379_000 picoseconds. - Weight::from_parts(1_098_227_098, 12214) - // Standard Error: 12_549_729 - .saturating_add(Weight::from_parts(21_628_382, 0).saturating_mul(t.into())) - // Standard Error: 19 - .saturating_add(Weight::from_parts(1_059, 0).saturating_mul(i.into())) - // Standard Error: 19 - .saturating_add(Weight::from_parts(1_220, 0).saturating_mul(s.into())) + // Minimum execution time: 2_111_632_000 picoseconds. + Weight::from_parts(1_213_125_745, 12214) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_055, 0).saturating_mul(i.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_192, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(19_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(11_u64)) @@ -3622,10 +3665,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `865 + r * (8 Β±0)` // Estimated: `9279 + r * (8 Β±0)` - // Minimum execution time: 257_102_000 picoseconds. - Weight::from_parts(278_920_692, 9279) - // Standard Error: 464 - .saturating_add(Weight::from_parts(374_120, 0).saturating_mul(r.into())) + // Minimum execution time: 264_227_000 picoseconds. + Weight::from_parts(281_015_995, 9279) + // Standard Error: 710 + .saturating_add(Weight::from_parts(365_734, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) @@ -3651,10 +3694,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `873` // Estimated: `9286` - // Minimum execution time: 266_609_000 picoseconds. - Weight::from_parts(263_034_132, 9286) + // Minimum execution time: 270_709_000 picoseconds. + Weight::from_parts(268_416_051, 9286) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_082, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_080, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -3679,10 +3722,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `867 + r * (8 Β±0)` // Estimated: `9284 + r * (8 Β±0)` - // Minimum execution time: 246_275_000 picoseconds. - Weight::from_parts(279_091_594, 9284) - // Standard Error: 887 - .saturating_add(Weight::from_parts(810_394, 0).saturating_mul(r.into())) + // Minimum execution time: 267_283_000 picoseconds. + Weight::from_parts(280_706_659, 9284) + // Standard Error: 540 + .saturating_add(Weight::from_parts(790_312, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) @@ -3708,10 +3751,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `875` // Estimated: `9292` - // Minimum execution time: 249_254_000 picoseconds. - Weight::from_parts(270_833_823, 9292) - // Standard Error: 0 - .saturating_add(Weight::from_parts(3_347, 0).saturating_mul(n.into())) + // Minimum execution time: 262_010_000 picoseconds. + Weight::from_parts(273_278_744, 9292) + // Standard Error: 1 + .saturating_add(Weight::from_parts(3_362, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -3736,10 +3779,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `867 + r * (8 Β±0)` // Estimated: `9286 + r * (8 Β±0)` - // Minimum execution time: 259_131_000 picoseconds. - Weight::from_parts(272_665_020, 9286) - // Standard Error: 862 - .saturating_add(Weight::from_parts(451_247, 0).saturating_mul(r.into())) + // Minimum execution time: 268_698_000 picoseconds. + Weight::from_parts(282_890_578, 9286) + // Standard Error: 622 + .saturating_add(Weight::from_parts(441_131, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) @@ -3765,10 +3808,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `875` // Estimated: `9291` - // Minimum execution time: 276_465_000 picoseconds. - Weight::from_parts(269_450_297, 9291) + // Minimum execution time: 252_846_000 picoseconds. + Weight::from_parts(267_657_561, 9291) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_190, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_208, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -3793,10 +3836,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `867 + r * (8 Β±0)` // Estimated: `9283 + r * (8 Β±0)` - // Minimum execution time: 248_349_000 picoseconds. - Weight::from_parts(273_660_686, 9283) - // Standard Error: 656 - .saturating_add(Weight::from_parts(444_293, 0).saturating_mul(r.into())) + // Minimum execution time: 266_899_000 picoseconds. + Weight::from_parts(276_862_067, 9283) + // Standard Error: 1_249 + .saturating_add(Weight::from_parts(443_970, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) @@ -3822,10 +3865,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `875` // Estimated: `9289` - // Minimum execution time: 252_107_000 picoseconds. - Weight::from_parts(267_427_726, 9289) + // Minimum execution time: 265_413_000 picoseconds. + Weight::from_parts(265_600_840, 9289) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_199, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_203, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -3850,10 +3893,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1000 + n * (1 Β±0)` // Estimated: `9412 + n * (1 Β±0)` - // Minimum execution time: 311_252_000 picoseconds. - Weight::from_parts(333_250_773, 9412) - // Standard Error: 12 - .saturating_add(Weight::from_parts(5_668, 0).saturating_mul(n.into())) + // Minimum execution time: 328_341_000 picoseconds. + Weight::from_parts(341_038_581, 9412) + // Standard Error: 10 + .saturating_add(Weight::from_parts(5_863, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -3877,12 +3920,12 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 160]`. fn seal_sr25519_verify(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `810 + r * (112 Β±0)` + // Measured: `808 + r * (112 Β±0)` // Estimated: `9226 + r * (112 Β±0)` - // Minimum execution time: 267_431_000 picoseconds. - Weight::from_parts(300_733_048, 9226) - // Standard Error: 8_806 - .saturating_add(Weight::from_parts(42_169_197, 0).saturating_mul(r.into())) + // Minimum execution time: 272_730_000 picoseconds. + Weight::from_parts(339_349_232, 9226) + // Standard Error: 13_004 + .saturating_add(Weight::from_parts(41_649_026, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(r.into())) @@ -3908,10 +3951,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `910 + r * (76 Β±0)` // Estimated: `9279 + r * (77 Β±0)` - // Minimum execution time: 265_905_000 picoseconds. - Weight::from_parts(305_381_951, 9279) - // Standard Error: 9_863 - .saturating_add(Weight::from_parts(45_771_182, 0).saturating_mul(r.into())) + // Minimum execution time: 273_892_000 picoseconds. + Weight::from_parts(352_853_960, 9279) + // Standard Error: 16_745 + .saturating_add(Weight::from_parts(45_853_294, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 77).saturating_mul(r.into())) @@ -3937,10 +3980,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `880 + r * (42 Β±0)` // Estimated: `9294 + r * (42 Β±0)` - // Minimum execution time: 266_701_000 picoseconds. - Weight::from_parts(292_407_888, 9294) - // Standard Error: 5_394 - .saturating_add(Weight::from_parts(11_896_065, 0).saturating_mul(r.into())) + // Minimum execution time: 268_431_000 picoseconds. + Weight::from_parts(319_727_796, 9294) + // Standard Error: 10_276 + .saturating_add(Weight::from_parts(11_928_882, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 42).saturating_mul(r.into())) @@ -3966,10 +4009,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + r * (965 Β±0)` // Estimated: `9285 + r * (3090 Β±7)` - // Minimum execution time: 268_755_000 picoseconds. - Weight::from_parts(272_300_000, 9285) - // Standard Error: 43_747 - .saturating_add(Weight::from_parts(25_001_889, 0).saturating_mul(r.into())) + // Minimum execution time: 275_482_000 picoseconds. + Weight::from_parts(279_155_000, 9285) + // Standard Error: 65_388 + .saturating_add(Weight::from_parts(26_634_187, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -3997,10 +4040,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `937 + r * (131 Β±0)` // Estimated: `9346 + r * (2607 Β±0)` - // Minimum execution time: 255_190_000 picoseconds. - Weight::from_parts(285_643_922, 9346) - // Standard Error: 23_582 - .saturating_add(Weight::from_parts(6_289_940, 0).saturating_mul(r.into())) + // Minimum execution time: 270_001_000 picoseconds. + Weight::from_parts(286_792_689, 9346) + // Standard Error: 21_074 + .saturating_add(Weight::from_parts(6_465_885, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -4028,10 +4071,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `972 + r * (184 Β±0)` // Estimated: `129453 + r * (2568 Β±0)` - // Minimum execution time: 260_061_000 picoseconds. - Weight::from_parts(286_849_343, 129453) - // Standard Error: 22_406 - .saturating_add(Weight::from_parts(5_661_459, 0).saturating_mul(r.into())) + // Minimum execution time: 270_652_000 picoseconds. + Weight::from_parts(293_369_452, 129453) + // Standard Error: 24_321 + .saturating_add(Weight::from_parts(5_575_600, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -4059,10 +4102,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `861 + r * (3 Β±0)` // Estimated: `9282 + r * (3 Β±0)` - // Minimum execution time: 250_922_000 picoseconds. - Weight::from_parts(277_294_913, 9282) - // Standard Error: 391 - .saturating_add(Weight::from_parts(169_457, 0).saturating_mul(r.into())) + // Minimum execution time: 267_749_000 picoseconds. + Weight::from_parts(280_373_341, 9282) + // Standard Error: 464 + .saturating_add(Weight::from_parts(170_398, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) @@ -4088,10 +4131,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2112 + r * (39 Β±0)` // Estimated: `10377 + r * (40 Β±0)` - // Minimum execution time: 267_293_000 picoseconds. - Weight::from_parts(309_125_635, 10377) - // Standard Error: 637 - .saturating_add(Weight::from_parts(245_508, 0).saturating_mul(r.into())) + // Minimum execution time: 270_260_000 picoseconds. + Weight::from_parts(337_969_172, 10377) + // Standard Error: 1_557 + .saturating_add(Weight::from_parts(305_735, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 40).saturating_mul(r.into())) @@ -4119,10 +4162,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `864 + r * (3 Β±0)` // Estimated: `9279 + r * (3 Β±0)` - // Minimum execution time: 256_057_000 picoseconds. - Weight::from_parts(278_068_870, 9279) - // Standard Error: 335 - .saturating_add(Weight::from_parts(152_083, 0).saturating_mul(r.into())) + // Minimum execution time: 265_607_000 picoseconds. + Weight::from_parts(283_396_630, 9279) + // Standard Error: 449 + .saturating_add(Weight::from_parts(149_018, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) @@ -4132,9 +4175,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_846_000 picoseconds. - Weight::from_parts(1_808_029, 0) - // Standard Error: 9 - .saturating_add(Weight::from_parts(14_937, 0).saturating_mul(r.into())) + // Minimum execution time: 1_813_000 picoseconds. + Weight::from_parts(1_264_945, 0) + // Standard Error: 19 + .saturating_add(Weight::from_parts(15_098, 0).saturating_mul(r.into())) } } -- GitLab From 117a9433dac88d5ac00c058c9b39c511d47749d2 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Wed, 6 Mar 2024 22:44:03 +0100 Subject: [PATCH 53/86] Make `penpal-runtime`'s `TrustedReserves` more connfigurable (#3564) The current `penpal` runtime utilizes the `EthereumLocation` parameter, which is employed for XCM emulated integration tests concerning the Rococo <> ETH bridge. It includes a hard-coded chainId for the Ethereum testnet utilized in Rococo. The `EthereumLocation` serves the purpose of aligning with the `TrustedReserves`. However, due to this hard-coded configuration, reusing `penpal` for testing various environments such as Kusama/Polkadot versus Ethereum bridge becomes unfeasible. This PR introduces the capability to easily customize the asset location for `TrustedReserves` without needing to know anything about Ethereum. ## TODO - [x] fix integration tests with `System::set_storage(CustomizableAssetFromSystemAssetHub::key(), )` @claravanstaden - [ ] ~~maybe add some helper function/macro to support `set_storage` for other runtimes (that we could reuse)~~ - [ ] Release patch for: `penpal-runtime` + emulated crate with `set_storage` support (if needed) - [ ] backport to 1.7.0 - [ ] backport to 1.8.0 --------- Co-authored-by: Clara van Staden --- Cargo.lock | 1 - .../chains/parachains/testing/penpal/src/lib.rs | 3 ++- .../bridge-hub-rococo/src/tests/snowbridge.rs | 11 +++++++++++ .../parachains/runtimes/testing/penpal/Cargo.toml | 2 -- .../runtimes/testing/penpal/src/xcm_config.rs | 15 ++++++++++----- 5 files changed, 23 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 26e134448ff..fbd347c5fde 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11754,7 +11754,6 @@ dependencies = [ "staging-xcm-builder", "staging-xcm-executor", "substrate-wasm-builder", - "testnet-parachains-constants", ] [[package]] diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs index 8f586a46a75..c1e03046655 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -16,7 +16,8 @@ mod genesis; pub use genesis::{genesis, ED, PARA_ID_A, PARA_ID_B}; pub use penpal_runtime::xcm_config::{ - LocalTeleportableToAssetHub, LocalTeleportableToAssetHubV3, XcmConfig, + CustomizableAssetFromSystemAssetHub, LocalTeleportableToAssetHub, + LocalTeleportableToAssetHubV3, XcmConfig, }; // Substrate diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index a5957ee3393..ff069e2d344 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -18,6 +18,7 @@ use codec::{Decode, Encode}; use emulated_integration_tests_common::xcm_emulator::ConvertLocation; use frame_support::pallet_prelude::TypeInfo; use hex_literal::hex; +use rococo_system_emulated_network::penpal_emulated_chain::CustomizableAssetFromSystemAssetHub; use rococo_westend_system_emulated_network::BridgeHubRococoParaSender as BridgeHubRococoSender; use snowbridge_core::outbound::OperatingMode; use snowbridge_pallet_inbound_queue_fixtures::{ @@ -253,6 +254,16 @@ fn send_token_from_ethereum_to_penpal() { (PenpalASender::get(), INITIAL_FUND), ]); + PenpalA::execute_with(|| { + assert_ok!(::System::set_storage( + ::RuntimeOrigin::root(), + vec![( + CustomizableAssetFromSystemAssetHub::key().to_vec(), + Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]).encode(), + )], + )); + }); + // The Weth asset location, identified by the contract address on Ethereum let weth_asset_location: Location = (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index bca6aca051f..5a8b15740e4 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -77,7 +77,6 @@ cumulus-primitives-utility = { path = "../../../../primitives/utility", default- pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } parachains-common = { path = "../../../common", default-features = false } -testnet-parachains-constants = { path = "../../constants", default-features = false, features = ["rococo"] } assets-common = { path = "../../assets/common", default-features = false } [features] @@ -133,7 +132,6 @@ std = [ "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", - "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", "xcm/std", diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index ef4f466d484..84b8fd9bb0a 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -43,7 +43,6 @@ use pallet_xcm::XcmPassthrough; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::impls::ToAuthor; use sp_runtime::traits::Zero; -use testnet_parachains_constants::rococo::snowbridge::EthereumNetwork; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, @@ -295,7 +294,13 @@ parameter_types! { 0, [xcm::v3::Junction::PalletInstance(50), xcm::v3::Junction::GeneralIndex(TELEPORTABLE_ASSET_ID.into())] ); - pub EthereumLocation: Location = Location::new(2, [GlobalConsensus(EthereumNetwork::get())]); + + /// The Penpal runtime is utilized for testing with various environment setups. + /// This storage item provides the opportunity to customize testing scenarios + /// by configuring the trusted asset from the `SystemAssetHub`. + /// + /// By default, it is configured as a `SystemAssetHubLocation` and can be modified using `System::set_storage`. + pub storage CustomizableAssetFromSystemAssetHub: Location = SystemAssetHubLocation::get(); } /// Accepts asset with ID `AssetLocation` and is coming from `Origin` chain. @@ -310,11 +315,11 @@ impl, Origin: Get> ContainsPair, NativeAssetFrom, - AssetPrefixFrom, + AssetPrefixFrom, ); pub type TrustedTeleporters = (AssetFromChain,); @@ -326,7 +331,7 @@ impl xcm_executor::Config for XcmConfig { // How to withdraw and deposit an asset. type AssetTransactor = AssetTransactors; type OriginConverter = XcmOriginToTransactDispatchOrigin; - type IsReserve = Reserves; + type IsReserve = TrustedReserves; // no teleport trust established with other chains type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; -- GitLab From 4ae7398818c4c49bec9b4185943cf01cbfb00144 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Thu, 7 Mar 2024 09:05:04 +0800 Subject: [PATCH 54/86] contracts: Remove wat support from test fixtures (#3588) In order to prepare for PolkaVM support I removed the wat support from our test fixture crate. - Removed redundant tests (invalid module checks are already inside the prepare module where they belong - Converted the gas_sync tests to Rust - Moved the start function test to the `wasm` module --- Cargo.lock | 1 - substrate/frame/contracts/fixtures/Cargo.toml | 1 - ...tract_no_call.rs => caller_is_origin_n.rs} | 17 +- .../data/invalid_contract_no_memory.wat | 5 - .../fixtures/data/invalid_module.wat | 8 - .../fixtures/data/run_out_of_gas_start_fn.wat | 10 -- .../fixtures/data/seal_input_noop.wat | 14 -- .../fixtures/data/seal_input_once.wat | 22 --- .../fixtures/data/seal_input_twice.wat | 28 --- substrate/frame/contracts/fixtures/src/lib.rs | 41 +---- substrate/frame/contracts/src/tests.rs | 167 ++---------------- substrate/frame/contracts/src/wasm/mod.rs | 18 ++ 12 files changed, 49 insertions(+), 283 deletions(-) rename substrate/frame/contracts/fixtures/contracts/{invalid_contract_no_call.rs => caller_is_origin_n.rs} (74%) delete mode 100644 substrate/frame/contracts/fixtures/data/invalid_contract_no_memory.wat delete mode 100644 substrate/frame/contracts/fixtures/data/invalid_module.wat delete mode 100644 substrate/frame/contracts/fixtures/data/run_out_of_gas_start_fn.wat delete mode 100644 substrate/frame/contracts/fixtures/data/seal_input_noop.wat delete mode 100644 substrate/frame/contracts/fixtures/data/seal_input_once.wat delete mode 100644 substrate/frame/contracts/fixtures/data/seal_input_twice.wat diff --git a/Cargo.lock b/Cargo.lock index fbd347c5fde..177d76b8706 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9634,7 +9634,6 @@ dependencies = [ "tempfile", "toml 0.8.8", "twox-hash", - "wat", ] [[package]] diff --git a/substrate/frame/contracts/fixtures/Cargo.toml b/substrate/frame/contracts/fixtures/Cargo.toml index 7fdf56a91fc..9ca9e404c31 100644 --- a/substrate/frame/contracts/fixtures/Cargo.toml +++ b/substrate/frame/contracts/fixtures/Cargo.toml @@ -11,7 +11,6 @@ description = "Fixtures for testing contracts pallet." workspace = true [dependencies] -wat = "1" frame-system = { path = "../../system" } sp-runtime = { path = "../../../primitives/runtime" } anyhow = "1.0.0" diff --git a/substrate/frame/contracts/fixtures/contracts/invalid_contract_no_call.rs b/substrate/frame/contracts/fixtures/contracts/caller_is_origin_n.rs similarity index 74% rename from substrate/frame/contracts/fixtures/contracts/invalid_contract_no_call.rs rename to substrate/frame/contracts/fixtures/contracts/caller_is_origin_n.rs index 13af3eb22b1..fd6f59802fa 100644 --- a/substrate/frame/contracts/fixtures/contracts/invalid_contract_no_call.rs +++ b/substrate/frame/contracts/fixtures/contracts/caller_is_origin_n.rs @@ -14,12 +14,25 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -//! Valid module but missing the call function + +//! This fixture calls caller_is_origin `n` times. + #![no_std] #![no_main] -extern crate common; +use common::input; +use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] #[polkavm_derive::polkavm_export] pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + input!(n: u32, ); + + for _ in 0..n { + let _ = api::caller_is_origin(); + } +} diff --git a/substrate/frame/contracts/fixtures/data/invalid_contract_no_memory.wat b/substrate/frame/contracts/fixtures/data/invalid_contract_no_memory.wat deleted file mode 100644 index 0aeefbcb7eb..00000000000 --- a/substrate/frame/contracts/fixtures/data/invalid_contract_no_memory.wat +++ /dev/null @@ -1,5 +0,0 @@ -;; A valid contract which does nothing at all -(module - (func (export "deploy")) - (func (export "call")) -) diff --git a/substrate/frame/contracts/fixtures/data/invalid_module.wat b/substrate/frame/contracts/fixtures/data/invalid_module.wat deleted file mode 100644 index e4a72f74273..00000000000 --- a/substrate/frame/contracts/fixtures/data/invalid_module.wat +++ /dev/null @@ -1,8 +0,0 @@ -;; An invalid module -(module - (func (export "deploy")) - (func (export "call") - ;; imbalanced stack - (i32.const 7) - ) -) diff --git a/substrate/frame/contracts/fixtures/data/run_out_of_gas_start_fn.wat b/substrate/frame/contracts/fixtures/data/run_out_of_gas_start_fn.wat deleted file mode 100644 index 6591d7ede78..00000000000 --- a/substrate/frame/contracts/fixtures/data/run_out_of_gas_start_fn.wat +++ /dev/null @@ -1,10 +0,0 @@ -(module - (import "env" "memory" (memory 1 1)) - (start $start) - (func $start - (loop $inf (br $inf)) ;; just run out of gas - (unreachable) - ) - (func (export "call")) - (func (export "deploy")) -) diff --git a/substrate/frame/contracts/fixtures/data/seal_input_noop.wat b/substrate/frame/contracts/fixtures/data/seal_input_noop.wat deleted file mode 100644 index 7b5a1e32af4..00000000000 --- a/substrate/frame/contracts/fixtures/data/seal_input_noop.wat +++ /dev/null @@ -1,14 +0,0 @@ -;; Everything prepared for the host function call, but no call is performed. -(module - (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - ;; [0, 8) buffer to write input - - ;; [8, 12) size of the input buffer - (data (i32.const 8) "\04") - - (func (export "call")) - - (func (export "deploy")) -) diff --git a/substrate/frame/contracts/fixtures/data/seal_input_once.wat b/substrate/frame/contracts/fixtures/data/seal_input_once.wat deleted file mode 100644 index 919a03a9b69..00000000000 --- a/substrate/frame/contracts/fixtures/data/seal_input_once.wat +++ /dev/null @@ -1,22 +0,0 @@ -;; Stores a value of the passed size. The host function is called once. -(module - (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - ;; [0, 8) buffer to write input - - ;; [8, 12) size of the input buffer - (data (i32.const 8) "\04") - - (func (export "call") - ;; instructions to consume engine fuel - (drop - (i32.const 42) - ) - - (call $seal_input (i32.const 0) (i32.const 8)) - - ) - - (func (export "deploy")) -) diff --git a/substrate/frame/contracts/fixtures/data/seal_input_twice.wat b/substrate/frame/contracts/fixtures/data/seal_input_twice.wat deleted file mode 100644 index 3a8be814efb..00000000000 --- a/substrate/frame/contracts/fixtures/data/seal_input_twice.wat +++ /dev/null @@ -1,28 +0,0 @@ -;; Stores a value of the passed size. The host function is called twice. -(module - (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - ;; [0, 8) buffer to write input - - ;; [8, 12) size of the input buffer - (data (i32.const 8) "\04") - - (func (export "call") - ;; instructions to consume engine fuel - (drop - (i32.const 42) - ) - - (call $seal_input (i32.const 0) (i32.const 8)) - - ;; instructions to consume engine fuel - (drop - (i32.const 42) - ) - - (call $seal_input (i32.const 0) (i32.const 8)) - ) - - (func (export "deploy")) -) diff --git a/substrate/frame/contracts/fixtures/src/lib.rs b/substrate/frame/contracts/fixtures/src/lib.rs index e0d9d4f8bd5..56a8e2321c5 100644 --- a/substrate/frame/contracts/fixtures/src/lib.rs +++ b/substrate/frame/contracts/fixtures/src/lib.rs @@ -16,34 +16,7 @@ // limitations under the License. use sp_runtime::traits::Hash; -use std::{env::var, fs, path::PathBuf}; - -fn wat_root_dir() -> PathBuf { - match (var("CARGO_MANIFEST_DIR"), var("CARGO_PKG_NAME")) { - // When `CARGO_MANIFEST_DIR` is not set, Rust resolves relative paths from the root folder - (Err(_), _) => "substrate/frame/contracts/fixtures/data".into(), - (Ok(path), Ok(s)) if s == "pallet-contracts" => PathBuf::from(path).join("fixtures/data"), - (Ok(path), Ok(s)) if s == "pallet-contracts-mock-network" => - PathBuf::from(path).parent().unwrap().join("fixtures/data"), - (Ok(_), pkg_name) => panic!("Failed to resolve fixture dir for tests from {pkg_name:?}."), - } -} - -/// Load a given wasm module represented by a .wat file and returns a wasm binary contents along -/// with it's hash. -/// -/// The fixture files are located under the `fixtures/` directory. -fn legacy_compile_module( - fixture_name: &str, -) -> anyhow::Result<(Vec, ::Output)> -where - T: frame_system::Config, -{ - let fixture_path = wat_root_dir().join(format!("{fixture_name}.wat")); - let wasm_binary = wat::parse_file(fixture_path)?; - let code_hash = T::Hashing::hash(&wasm_binary); - Ok((wasm_binary, code_hash)) -} +use std::{fs, path::PathBuf}; /// Load a given wasm module and returns a wasm binary contents along with it's hash. /// Use the legacy compile_module as fallback, if the rust fixture does not exist yet. @@ -53,15 +26,11 @@ pub fn compile_module( where T: frame_system::Config, { - let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let out_dir: PathBuf = env!("OUT_DIR").into(); let fixture_path = out_dir.join(format!("{fixture_name}.wasm")); - match fs::read(fixture_path) { - Ok(wasm_binary) => { - let code_hash = T::Hashing::hash(&wasm_binary); - Ok((wasm_binary, code_hash)) - }, - Err(_) => legacy_compile_module::(fixture_name), - } + let binary = fs::read(fixture_path)?; + let code_hash = T::Hashing::hash(&binary); + Ok((binary, code_hash)) } #[cfg(test)] diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index 011b2b25b2b..4c1f65e28d5 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -852,27 +852,6 @@ fn deposit_event_max_value_limit() { }); } -// Fail out of fuel (ref_time weight) inside the start function. -#[test] -fn run_out_of_fuel_start_fun() { - let (wasm, _code_hash) = compile_module::("run_out_of_gas_start_fn").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - RuntimeOrigin::signed(ALICE), - 0, - Weight::from_parts(1_000_000_000_000, u64::MAX), - None, - wasm, - vec![], - vec![], - ), - Error::::OutOfGas, - ); - }); -} - // Fail out of fuel (ref_time weight) in the engine. #[test] fn run_out_of_fuel_engine() { @@ -956,50 +935,15 @@ fn run_out_of_fuel_host() { #[test] fn gas_syncs_work() { - let (wasm0, _code_hash) = compile_module::("seal_input_noop").unwrap(); - let (wasm1, _code_hash) = compile_module::("seal_input_once").unwrap(); - let (wasm2, _code_hash) = compile_module::("seal_input_twice").unwrap(); + let (code, _code_hash) = compile_module::("caller_is_origin_n").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Instantiate noop contract. - let addr0 = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm0), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - - // Instantiate 1st contract. - let addr1 = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm1), - vec![], - vec![], - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id; - - // Instantiate 2nd contract. - let addr2 = Contracts::bare_instantiate( + let addr = Contracts::bare_instantiate( ALICE, 0, GAS_LIMIT, None, - Code::Upload(wasm2), + Code::Upload(code), vec![], vec![], DebugInfo::Skip, @@ -1011,11 +955,11 @@ fn gas_syncs_work() { let result = Contracts::bare_call( ALICE, - addr0, + addr.clone(), 0, GAS_LIMIT, None, - 1u8.to_le_bytes().to_vec(), + 0u32.encode(), DebugInfo::Skip, CollectEvents::Skip, Determinism::Enforced, @@ -1025,27 +969,28 @@ fn gas_syncs_work() { let result = Contracts::bare_call( ALICE, - addr1, + addr.clone(), 0, GAS_LIMIT, None, - 1u8.to_le_bytes().to_vec(), + 1u32.encode(), DebugInfo::Skip, CollectEvents::Skip, Determinism::Enforced, ); assert_ok!(result.result); let gas_consumed_once = result.gas_consumed.ref_time(); - let host_consumed_once = ::Schedule::get().host_fn_weights.input.ref_time(); + let host_consumed_once = + ::Schedule::get().host_fn_weights.caller_is_origin.ref_time(); let engine_consumed_once = gas_consumed_once - host_consumed_once - engine_consumed_noop; let result = Contracts::bare_call( ALICE, - addr2, + addr, 0, GAS_LIMIT, None, - 1u8.to_le_bytes().to_vec(), + 2u32.encode(), DebugInfo::Skip, CollectEvents::Skip, Determinism::Enforced, @@ -4453,96 +4398,6 @@ fn contract_reverted() { }); } -#[test] -fn code_rejected_error_works() { - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - - let (wasm, _) = compile_module::("invalid_module").unwrap(); - assert_noop!( - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm.clone(), - None, - Determinism::Enforced - ), - >::CodeRejected, - ); - let result = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - ); - assert_err!(result.result, >::CodeRejected); - assert_eq!( - std::str::from_utf8(&result.debug_message).unwrap(), - "Can't load the module into wasmi!" - ); - - let (wasm, _) = compile_module::("invalid_contract_no_call").unwrap(); - assert_noop!( - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm.clone(), - None, - Determinism::Enforced - ), - >::CodeRejected, - ); - - let result = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - ); - assert_err!(result.result, >::CodeRejected); - assert_eq!( - std::str::from_utf8(&result.debug_message).unwrap(), - "call function isn't exported" - ); - - let (wasm, _) = compile_module::("invalid_contract_no_memory").unwrap(); - assert_noop!( - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm.clone(), - None, - Determinism::Enforced - ), - >::CodeRejected, - ); - - let result = Contracts::bare_instantiate( - ALICE, - 0, - GAS_LIMIT, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - ); - assert_err!(result.result, >::CodeRejected); - assert_eq!( - std::str::from_utf8(&result.debug_message).unwrap(), - "No memory import found in the module" - ); - }); -} - #[test] fn set_code_hash() { let (wasm, code_hash) = compile_module::("set_code_hash").unwrap(); diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index 87e92cd72e8..23363780b14 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -3442,4 +3442,22 @@ mod tests { runtime.read_sandbox_memory_as(&memory, 0u32).unwrap(); assert_eq!(decoded.into_inner(), data); } + + #[test] + fn run_out_of_gas_in_start_fn() { + const CODE: &str = r#" +(module + (import "env" "memory" (memory 1 1)) + (start $start) + (func $start + (loop $inf (br $inf)) ;; just run out of gas + (unreachable) + ) + (func (export "call")) + (func (export "deploy")) +) +"#; + let mut mock_ext = MockExt::default(); + assert_err!(execute(&CODE, vec![], &mut mock_ext), >::OutOfGas); + } } -- GitLab From c16fcc47bc427dd01ed3d1c17d6483bd388bc92f Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Thu, 7 Mar 2024 11:11:43 +0200 Subject: [PATCH 55/86] ParaInherent create: update `apply_weight_limit` wrt elastic scaling (#3573) Changes the way we perform the random selection of backed candidates when there isn't enough room for all of them. Instead of picking individual backed candidates `apply_weight` now operates on chains of candidates. This is fully backwards compatible and relies on the node side (provisioner/prospective parachains) doing the heavy lifting and providing the candidates in the order they form a chain. The same approach can be implemented for bitfields random selection once https://github.com/paritytech/polkadot-sdk/pull/3479 is merged. The approach taken in this PR aims for reduced additional complexity at the cost of being less fair wrt how many backed candidates from each chain are picked. It favors elastic scaling parachains vs parachains not using elastic scaling, but from my perspective it should be fine as this should happen under exceptional circumstances like dispute storms. Note: to make things more fair we can consider specializing `random_sel` such that it will try to pick candidates one by one in the order provided by the provisioner such that non elastic scaling parachains have the same chance of getting a candidate backed. --------- Signed-off-by: Andrei Sandu --- .../parachains/src/paras_inherent/mod.rs | 64 +++++++-- .../parachains/src/paras_inherent/tests.rs | 125 +++++++++++++++++- 2 files changed, 174 insertions(+), 15 deletions(-) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index cebf858c24a..723a15bdba7 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -788,7 +788,7 @@ fn random_sel Weight>( /// Assumes disputes are already filtered by the time this is called. /// /// Returns the total weight consumed by `bitfields` and `candidates`. -fn apply_weight_limit( +pub(crate) fn apply_weight_limit( candidates: &mut Vec::Hash>>, bitfields: &mut UncheckedSignedAvailabilityBitfields, max_consumable_weight: Weight, @@ -805,35 +805,71 @@ fn apply_weight_limit( return total } - // Prefer code upgrades, they tend to be large and hence stand no chance to be picked - // late while maintaining the weight bounds. - let preferred_indices = candidates + // Invariant: block author provides candidate in the order in which they form a chain + // wrt elastic scaling. If the invariant is broken, we'd fail later when filtering candidates + // which are unchained. + + let mut chained_candidates: Vec> = Vec::new(); + let mut current_para_id = None; + + for candidate in sp_std::mem::take(candidates).into_iter() { + let candidate_para_id = candidate.descriptor().para_id; + if Some(candidate_para_id) == current_para_id { + let chain = chained_candidates + .last_mut() + .expect("if the current_para_id is Some, then vec is not empty; qed"); + chain.push(candidate); + } else { + current_para_id = Some(candidate_para_id); + chained_candidates.push(vec![candidate]); + } + } + + // Elastic scaling: we prefer chains that have a code upgrade among the candidates, + // as the candidates containing the upgrade tend to be large and hence stand no chance to + // be picked late while maintaining the weight bounds. + // + // Limitations: For simplicity if total weight of a chain of candidates is larger than + // the remaining weight, the chain will still not be included while it could still be possible + // to include part of that chain. + let preferred_chain_indices = chained_candidates .iter() .enumerate() - .filter_map(|(idx, candidate)| { - candidate.candidate().commitments.new_validation_code.as_ref().map(|_code| idx) + .filter_map(|(idx, candidates)| { + // Check if any of the candidate in chain contains a code upgrade. + if candidates + .iter() + .any(|candidate| candidate.candidate().commitments.new_validation_code.is_some()) + { + Some(idx) + } else { + None + } }) .collect::>(); - // There is weight remaining to be consumed by a subset of candidates + // There is weight remaining to be consumed by a subset of chained candidates // which are going to be picked now. if let Some(max_consumable_by_candidates) = max_consumable_weight.checked_sub(&total_bitfields_weight) { - let (acc_candidate_weight, indices) = - random_sel::::Hash>, _>( + let (acc_candidate_weight, chained_indices) = + random_sel::::Hash>>, _>( rng, - &candidates, - preferred_indices, - |c| backed_candidate_weight::(c), + &chained_candidates, + preferred_chain_indices, + |candidates| backed_candidates_weight::(&candidates), max_consumable_by_candidates, ); - log::debug!(target: LOG_TARGET, "Indices Candidates: {:?}, size: {}", indices, candidates.len()); - candidates.indexed_retain(|idx, _backed_candidate| indices.binary_search(&idx).is_ok()); + log::debug!(target: LOG_TARGET, "Indices Candidates: {:?}, size: {}", chained_indices, candidates.len()); + chained_candidates + .indexed_retain(|idx, _backed_candidates| chained_indices.binary_search(&idx).is_ok()); // pick all bitfields, and // fill the remaining space with candidates let total_consumed = acc_candidate_weight.saturating_add(total_bitfields_weight); + *candidates = chained_candidates.into_iter().flatten().collect::>(); + return total_consumed } diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index b7285ec884a..fb4d1bd2226 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -22,7 +22,7 @@ use super::*; #[cfg(not(feature = "runtime-benchmarks"))] mod enter { - use super::*; + use super::{inclusion::tests::TestCandidateBuilder, *}; use crate::{ builder::{Bench, BenchBuilder}, mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, MockGenesisConfig, Test}, @@ -925,6 +925,129 @@ mod enter { }); } + // Helper fn that builds chained dummy candidates for elastic scaling tests + fn build_backed_candidate_chain( + para_id: ParaId, + len: usize, + start_core_index: usize, + code_upgrade_index: Option, + ) -> Vec { + if let Some(code_upgrade_index) = code_upgrade_index { + assert!(code_upgrade_index < len, "Code upgrade index out of bounds"); + } + + (0..len) + .into_iter() + .map(|idx| { + let mut builder = TestCandidateBuilder::default(); + builder.para_id = para_id; + let mut ccr = builder.build(); + + if Some(idx) == code_upgrade_index { + ccr.commitments.new_validation_code = Some(vec![1, 2, 3, 4].into()); + } + + ccr.commitments.processed_downward_messages = idx as u32; + let core_index = start_core_index + idx; + + BackedCandidate::new( + ccr.into(), + Default::default(), + Default::default(), + Some(CoreIndex(core_index as u32)), + ) + }) + .collect::>() + } + + // Ensure that overweight parachain inherents are always rejected by the runtime. + // Runtime should panic and return `InherentOverweight` error. + #[test] + fn test_backed_candidates_apply_weight_works_for_elastic_scaling() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + let seed = [ + 1, 0, 52, 0, 0, 0, 0, 0, 1, 0, 10, 0, 22, 32, 0, 0, 2, 0, 55, 49, 0, 11, 0, 0, 3, + 0, 0, 0, 0, 0, 2, 92, + ]; + let mut rng = rand_chacha::ChaChaRng::from_seed(seed); + + // Create an overweight inherent and oversized block + let mut backed_and_concluding = BTreeMap::new(); + + for i in 0..30 { + backed_and_concluding.insert(i, i); + } + + let scenario = make_inherent_data(TestConfig { + dispute_statements: Default::default(), + dispute_sessions: vec![], // 3 cores with disputes + backed_and_concluding, + num_validators_per_core: 5, + code_upgrade: None, + fill_claimqueue: false, + }); + + let mut para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (5 validators per core, 30 backed candidates, 0 disputes + // => 5*30 = 150) + assert_eq!(para_inherent_data.bitfields.len(), 150); + // * 30 backed candidates + assert_eq!(para_inherent_data.backed_candidates.len(), 30); + + let mut input_candidates = + build_backed_candidate_chain(ParaId::from(1000), 3, 0, Some(1)); + let chained_candidates_weight = backed_candidates_weight::(&input_candidates); + + input_candidates.append(&mut para_inherent_data.backed_candidates); + let input_bitfields = para_inherent_data.bitfields; + + // Test if weight insufficient even for 1 candidate (which doesn't contain a code + // upgrade). + let max_weight = backed_candidate_weight::(&input_candidates[0]) + + signed_bitfields_weight::(&input_bitfields); + let mut backed_candidates = input_candidates.clone(); + let mut bitfields = input_bitfields.clone(); + apply_weight_limit::( + &mut backed_candidates, + &mut bitfields, + max_weight, + &mut rng, + ); + + // The chained candidates are not picked, instead a single other candidate is picked + assert_eq!(backed_candidates.len(), 1); + assert_ne!(backed_candidates[0].descriptor().para_id, ParaId::from(1000)); + + // All bitfields are kept. + assert_eq!(bitfields.len(), 150); + + // Test if para_id 1000 chained candidates make it if there is enough room for its 3 + // candidates. + let max_weight = + chained_candidates_weight + signed_bitfields_weight::(&input_bitfields); + let mut backed_candidates = input_candidates.clone(); + let mut bitfields = input_bitfields.clone(); + apply_weight_limit::( + &mut backed_candidates, + &mut bitfields, + max_weight, + &mut rng, + ); + + // Only the chained candidates should pass filter. + assert_eq!(backed_candidates.len(), 3); + // Check the actual candidates + assert_eq!(backed_candidates[0].descriptor().para_id, ParaId::from(1000)); + assert_eq!(backed_candidates[1].descriptor().para_id, ParaId::from(1000)); + assert_eq!(backed_candidates[2].descriptor().para_id, ParaId::from(1000)); + + // All bitfields are kept. + assert_eq!(bitfields.len(), 150); + }); + } + // Ensure that overweight parachain inherents are always rejected by the runtime. // Runtime should panic and return `InherentOverweight` error. #[test] -- GitLab From 11831df8e709061e9c6b3292facb5d7d9709f151 Mon Sep 17 00:00:00 2001 From: Dastan <88332432+dastansam@users.noreply.github.com> Date: Thu, 7 Mar 2024 11:36:30 +0100 Subject: [PATCH 56/86] `nomination-pools`: add permissionless condition to `chill` (#3453) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, pool member funds cannot be unbonded if the depositor's stake is less than `MinNominatorBond`. This usually happens after `T::MinNominatorBond` is increased. To fix this, the above mentioned condition is added as a case for permissionless dispatch of `chill`. After pool is chilled, pool members can unbond their funds since pool won't be nominating anymore. Consequently, same check is added to `nominate` call, i.e pool can not start nominating if it's depositor does not have `MinNominatorBond` cc @Ank4n @kianenigma closes #2350 Polkadot address: 16FqwPZ8GRC5U5D4Fu7W33nA55ZXzXGWHwmbnE1eT6pxuqcT --------- Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com> Co-authored-by: GonΓ§alo Pestana Co-authored-by: command-bot <> --- prdoc/pr_3453.prdoc | 13 + substrate/frame/nomination-pools/src/lib.rs | 39 ++- substrate/frame/nomination-pools/src/tests.rs | 42 +++ .../frame/nomination-pools/src/weights.rs | 268 ++++++++++-------- .../nomination-pools/test-staking/src/lib.rs | 133 ++++++++- 5 files changed, 367 insertions(+), 128 deletions(-) create mode 100644 prdoc/pr_3453.prdoc diff --git a/prdoc/pr_3453.prdoc b/prdoc/pr_3453.prdoc new file mode 100644 index 00000000000..1f01440f722 --- /dev/null +++ b/prdoc/pr_3453.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[pallet-nomination-pools]: `chill` is permissionless if depositor's stake is less than `min_nominator_bond`" + +doc: + - audience: Runtime Dev + description: | + Nomination pools currently have an issue whereby member funds cannot be unbonded if the depositor bonded amount is less than `MinNominatorBond`. + This PR makes the `chill` function permissionless if this condition is met. + Consequently, `nominate` function also checks for `depositor` to have at least `MinNominatorBond`, and returns `MinimumBondNotMet` error if not. +crates: + - name: pallet-nomination-pools diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index c64db9ed692..a0dbdb3aaa4 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -2401,6 +2401,11 @@ pub mod pallet { /// /// This directly forward the call to the staking pallet, on behalf of the pool bonded /// account. + /// + /// # Note + /// + /// In addition to a `root` or `nominator` role of `origin`, pool's depositor needs to have + /// at least `depositor_min_bond` in the pool to start nominating. #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::nominate(validators.len() as u32))] pub fn nominate( @@ -2411,6 +2416,16 @@ pub mod pallet { let who = ensure_signed(origin)?; let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; ensure!(bonded_pool.can_nominate(&who), Error::::NotNominator); + + let depositor_points = PoolMembers::::get(&bonded_pool.roles.depositor) + .ok_or(Error::::PoolMemberNotFound)? + .active_points(); + + ensure!( + bonded_pool.points_to_balance(depositor_points) >= Self::depositor_min_bond(), + Error::::MinimumBondNotMet + ); + T::Staking::nominate(&bonded_pool.bonded_account(), validators) } @@ -2573,17 +2588,37 @@ pub mod pallet { /// Chill on behalf of the pool. /// - /// The dispatch origin of this call must be signed by the pool nominator or the pool + /// The dispatch origin of this call can be signed by the pool nominator or the pool /// root role, same as [`Pallet::nominate`]. /// + /// Under certain conditions, this call can be dispatched permissionlessly (i.e. by any + /// account). + /// + /// # Conditions for a permissionless dispatch: + /// * When pool depositor has less than `MinNominatorBond` staked, otherwise pool members + /// are unable to unbond. + /// + /// # Conditions for permissioned dispatch: + /// * The caller has a nominator or root role of the pool. /// This directly forward the call to the staking pallet, on behalf of the pool bonded /// account. #[pallet::call_index(13)] #[pallet::weight(T::WeightInfo::chill())] pub fn chill(origin: OriginFor, pool_id: PoolId) -> DispatchResult { let who = ensure_signed(origin)?; + let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; - ensure!(bonded_pool.can_nominate(&who), Error::::NotNominator); + + let depositor_points = PoolMembers::::get(&bonded_pool.roles.depositor) + .ok_or(Error::::PoolMemberNotFound)? + .active_points(); + + if bonded_pool.points_to_balance(depositor_points) >= + T::Staking::minimum_nominator_bond() + { + ensure!(bonded_pool.can_nominate(&who), Error::::NotNominator); + } + T::Staking::chill(&bonded_pool.bonded_account()) } diff --git a/substrate/frame/nomination-pools/src/tests.rs b/substrate/frame/nomination-pools/src/tests.rs index 13298fa64f5..8fb2b41b88a 100644 --- a/substrate/frame/nomination-pools/src/tests.rs +++ b/substrate/frame/nomination-pools/src/tests.rs @@ -4848,6 +4848,18 @@ mod nominate { Error::::NotNominator ); + // if `depositor` stake is less than the `MinimumNominatorBond`, they can't nominate + StakingMinBond::set(20); + + // Can't nominate if depositor's stake is less than the `MinimumNominatorBond` + assert_noop!( + Pools::nominate(RuntimeOrigin::signed(900), 1, vec![21]), + Error::::MinimumBondNotMet + ); + + // restore `MinimumNominatorBond` + StakingMinBond::set(10); + // Root can nominate assert_ok!(Pools::nominate(RuntimeOrigin::signed(900), 1, vec![21])); assert_eq!(Nominations::get().unwrap(), vec![21]); @@ -7338,3 +7350,33 @@ mod slash { }); } } + +mod chill { + use super::*; + + #[test] + fn chill_works() { + ExtBuilder::default().build_and_execute(|| { + // only nominator or root can chill + assert_noop!( + Pools::chill(RuntimeOrigin::signed(10), 1), + Error::::NotNominator + ); + + // root can chill and re-nominate + assert_ok!(Pools::chill(RuntimeOrigin::signed(900), 1)); + assert_ok!(Pools::nominate(RuntimeOrigin::signed(900), 1, vec![31])); + + // nominator can chill and re-nominate + assert_ok!(Pools::chill(RuntimeOrigin::signed(901), 1)); + assert_ok!(Pools::nominate(RuntimeOrigin::signed(901), 1, vec![31])); + + // if `depositor` stake is less than the `MinimumNominatorBond`, then this call + // becomes permissionless; + StakingMinBond::set(20); + + // any account can chill + assert_ok!(Pools::chill(RuntimeOrigin::signed(10), 1)); + }) + } +} diff --git a/substrate/frame/nomination-pools/src/weights.rs b/substrate/frame/nomination-pools/src/weights.rs index 2749af7937f..987ddd71703 100644 --- a/substrate/frame/nomination-pools/src/weights.rs +++ b/substrate/frame/nomination-pools/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_nomination_pools` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-p5qp1txx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_nomination_pools -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/nomination-pools/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_nomination_pools +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/nomination-pools/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -114,8 +112,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3425` // Estimated: `8877` - // Minimum execution time: 181_861_000 picoseconds. - Weight::from_parts(186_375_000, 8877) + // Minimum execution time: 182_643_000 picoseconds. + Weight::from_parts(186_106_000, 8877) .saturating_add(T::DbWeight::get().reads(20_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -147,8 +145,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3435` // Estimated: `8877` - // Minimum execution time: 182_273_000 picoseconds. - Weight::from_parts(186_635_000, 8877) + // Minimum execution time: 181_464_000 picoseconds. + Weight::from_parts(184_672_000, 8877) .saturating_add(T::DbWeight::get().reads(17_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -182,8 +180,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3500` // Estimated: `8877` - // Minimum execution time: 217_878_000 picoseconds. - Weight::from_parts(221_493_000, 8877) + // Minimum execution time: 216_182_000 picoseconds. + Weight::from_parts(218_448_000, 8877) .saturating_add(T::DbWeight::get().reads(18_u64)) .saturating_add(T::DbWeight::get().writes(14_u64)) } @@ -203,8 +201,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1172` // Estimated: `3719` - // Minimum execution time: 74_509_000 picoseconds. - Weight::from_parts(76_683_000, 3719) + // Minimum execution time: 73_830_000 picoseconds. + Weight::from_parts(75_271_000, 3719) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -244,8 +242,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3622` // Estimated: `27847` - // Minimum execution time: 166_424_000 picoseconds. - Weight::from_parts(169_698_000, 27847) + // Minimum execution time: 166_099_000 picoseconds. + Weight::from_parts(170_355_000, 27847) .saturating_add(T::DbWeight::get().reads(20_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } @@ -270,10 +268,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1848` // Estimated: `4764` - // Minimum execution time: 66_110_000 picoseconds. - Weight::from_parts(68_620_141, 4764) - // Standard Error: 1_379 - .saturating_add(Weight::from_parts(54_961, 0).saturating_mul(s.into())) + // Minimum execution time: 64_787_000 picoseconds. + Weight::from_parts(67_920_914, 4764) + // Standard Error: 1_482 + .saturating_add(Weight::from_parts(43_264, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -308,10 +306,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2238` // Estimated: `27847` - // Minimum execution time: 125_669_000 picoseconds. - Weight::from_parts(130_907_641, 27847) - // Standard Error: 2_490 - .saturating_add(Weight::from_parts(75_219, 0).saturating_mul(s.into())) + // Minimum execution time: 124_990_000 picoseconds. + Weight::from_parts(129_041_398, 27847) + // Standard Error: 2_335 + .saturating_add(Weight::from_parts(67_889, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(9_u64)) } @@ -362,12 +360,14 @@ impl WeightInfo for SubstrateWeight { /// Storage: `NominationPools::ClaimPermissions` (r:0 w:1) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. - fn withdraw_unbonded_kill(_s: u32, ) -> Weight { + fn withdraw_unbonded_kill(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `2525` // Estimated: `27847` - // Minimum execution time: 225_700_000 picoseconds. - Weight::from_parts(234_390_990, 27847) + // Minimum execution time: 221_955_000 picoseconds. + Weight::from_parts(230_244_437, 27847) + // Standard Error: 4_059 + .saturating_add(Weight::from_parts(7_522, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(24_u64)) .saturating_add(T::DbWeight::get().writes(20_u64)) } @@ -419,19 +419,25 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1169` // Estimated: `8538` - // Minimum execution time: 167_171_000 picoseconds. - Weight::from_parts(170_531_000, 8538) + // Minimum execution time: 165_358_000 picoseconds. + Weight::from_parts(169_683_000, 8538) .saturating_add(T::DbWeight::get().reads(23_u64)) .saturating_add(T::DbWeight::get().writes(17_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinCreateBond` (r:1 w:0) + /// Proof: `NominationPools::MinCreateBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinJoinBond` (r:1 w:0) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) @@ -451,13 +457,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1808` + // Measured: `1976` // Estimated: `4556 + n * (2520 Β±0)` - // Minimum execution time: 63_785_000 picoseconds. - Weight::from_parts(64_592_302, 4556) - // Standard Error: 9_416 - .saturating_add(Weight::from_parts(1_524_398, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) + // Minimum execution time: 74_483_000 picoseconds. + Weight::from_parts(76_611_288, 4556) + // Standard Error: 9_013 + .saturating_add(Weight::from_parts(1_475_128, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) @@ -472,8 +478,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1434` // Estimated: `4556` - // Minimum execution time: 32_096_000 picoseconds. - Weight::from_parts(33_533_000, 4556) + // Minimum execution time: 32_598_000 picoseconds. + Weight::from_parts(33_350_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -488,10 +494,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3735` - // Minimum execution time: 13_914_000 picoseconds. - Weight::from_parts(14_800_402, 3735) - // Standard Error: 146 - .saturating_add(Weight::from_parts(940, 0).saturating_mul(n.into())) + // Minimum execution time: 13_705_000 picoseconds. + Weight::from_parts(14_594_211, 3735) + // Standard Error: 141 + .saturating_add(Weight::from_parts(2_119, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -511,8 +517,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_373_000 picoseconds. - Weight::from_parts(4_592_000, 0) + // Minimum execution time: 4_222_000 picoseconds. + Weight::from_parts(4_527_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -521,17 +527,21 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_662_000 picoseconds. - Weight::from_parts(17_531_000, 3719) + // Minimum execution time: 16_106_000 picoseconds. + Weight::from_parts(17_365_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -546,11 +556,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1971` + // Measured: `2143` // Estimated: `4556` - // Minimum execution time: 61_348_000 picoseconds. - Weight::from_parts(63_712_000, 4556) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Minimum execution time: 71_301_000 picoseconds. + Weight::from_parts(73_626_000, 4556) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -565,8 +575,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `804` // Estimated: `3719` - // Minimum execution time: 32_232_000 picoseconds. - Weight::from_parts(33_433_000, 3719) + // Minimum execution time: 32_363_000 picoseconds. + Weight::from_parts(33_400_000, 3719) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -578,8 +588,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `572` // Estimated: `3719` - // Minimum execution time: 16_774_000 picoseconds. - Weight::from_parts(17_671_000, 3719) + // Minimum execution time: 16_686_000 picoseconds. + Weight::from_parts(17_294_000, 3719) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -589,8 +599,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_724_000 picoseconds. - Weight::from_parts(17_181_000, 3719) + // Minimum execution time: 16_417_000 picoseconds. + Weight::from_parts(17_028_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -600,8 +610,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_362_000 picoseconds. - Weight::from_parts(17_135_000, 3719) + // Minimum execution time: 16_350_000 picoseconds. + Weight::from_parts(16_905_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -613,8 +623,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 14_125_000 picoseconds. - Weight::from_parts(14_705_000, 3702) + // Minimum execution time: 14_081_000 picoseconds. + Weight::from_parts(14_608_000, 3702) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -630,8 +640,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1002` // Estimated: `3719` - // Minimum execution time: 61_699_000 picoseconds. - Weight::from_parts(63_605_000, 3719) + // Minimum execution time: 62_178_000 picoseconds. + Weight::from_parts(64_039_000, 3719) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -647,8 +657,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `901` // Estimated: `4764` - // Minimum execution time: 64_930_000 picoseconds. - Weight::from_parts(66_068_000, 4764) + // Minimum execution time: 64_880_000 picoseconds. + Weight::from_parts(66_541_000, 4764) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -692,8 +702,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3425` // Estimated: `8877` - // Minimum execution time: 181_861_000 picoseconds. - Weight::from_parts(186_375_000, 8877) + // Minimum execution time: 182_643_000 picoseconds. + Weight::from_parts(186_106_000, 8877) .saturating_add(RocksDbWeight::get().reads(20_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -725,8 +735,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3435` // Estimated: `8877` - // Minimum execution time: 182_273_000 picoseconds. - Weight::from_parts(186_635_000, 8877) + // Minimum execution time: 181_464_000 picoseconds. + Weight::from_parts(184_672_000, 8877) .saturating_add(RocksDbWeight::get().reads(17_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -760,8 +770,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3500` // Estimated: `8877` - // Minimum execution time: 217_878_000 picoseconds. - Weight::from_parts(221_493_000, 8877) + // Minimum execution time: 216_182_000 picoseconds. + Weight::from_parts(218_448_000, 8877) .saturating_add(RocksDbWeight::get().reads(18_u64)) .saturating_add(RocksDbWeight::get().writes(14_u64)) } @@ -781,8 +791,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1172` // Estimated: `3719` - // Minimum execution time: 74_509_000 picoseconds. - Weight::from_parts(76_683_000, 3719) + // Minimum execution time: 73_830_000 picoseconds. + Weight::from_parts(75_271_000, 3719) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -822,8 +832,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3622` // Estimated: `27847` - // Minimum execution time: 166_424_000 picoseconds. - Weight::from_parts(169_698_000, 27847) + // Minimum execution time: 166_099_000 picoseconds. + Weight::from_parts(170_355_000, 27847) .saturating_add(RocksDbWeight::get().reads(20_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } @@ -848,10 +858,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1848` // Estimated: `4764` - // Minimum execution time: 66_110_000 picoseconds. - Weight::from_parts(68_620_141, 4764) - // Standard Error: 1_379 - .saturating_add(Weight::from_parts(54_961, 0).saturating_mul(s.into())) + // Minimum execution time: 64_787_000 picoseconds. + Weight::from_parts(67_920_914, 4764) + // Standard Error: 1_482 + .saturating_add(Weight::from_parts(43_264, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -886,10 +896,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2238` // Estimated: `27847` - // Minimum execution time: 125_669_000 picoseconds. - Weight::from_parts(130_907_641, 27847) - // Standard Error: 2_490 - .saturating_add(Weight::from_parts(75_219, 0).saturating_mul(s.into())) + // Minimum execution time: 124_990_000 picoseconds. + Weight::from_parts(129_041_398, 27847) + // Standard Error: 2_335 + .saturating_add(Weight::from_parts(67_889, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(9_u64)) } @@ -940,12 +950,14 @@ impl WeightInfo for () { /// Storage: `NominationPools::ClaimPermissions` (r:0 w:1) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. - fn withdraw_unbonded_kill(_s: u32, ) -> Weight { + fn withdraw_unbonded_kill(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `2525` // Estimated: `27847` - // Minimum execution time: 225_700_000 picoseconds. - Weight::from_parts(234_390_990, 27847) + // Minimum execution time: 221_955_000 picoseconds. + Weight::from_parts(230_244_437, 27847) + // Standard Error: 4_059 + .saturating_add(Weight::from_parts(7_522, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(24_u64)) .saturating_add(RocksDbWeight::get().writes(20_u64)) } @@ -997,19 +1009,25 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1169` // Estimated: `8538` - // Minimum execution time: 167_171_000 picoseconds. - Weight::from_parts(170_531_000, 8538) + // Minimum execution time: 165_358_000 picoseconds. + Weight::from_parts(169_683_000, 8538) .saturating_add(RocksDbWeight::get().reads(23_u64)) .saturating_add(RocksDbWeight::get().writes(17_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinCreateBond` (r:1 w:0) + /// Proof: `NominationPools::MinCreateBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinJoinBond` (r:1 w:0) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) @@ -1029,13 +1047,13 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1808` + // Measured: `1976` // Estimated: `4556 + n * (2520 Β±0)` - // Minimum execution time: 63_785_000 picoseconds. - Weight::from_parts(64_592_302, 4556) - // Standard Error: 9_416 - .saturating_add(Weight::from_parts(1_524_398, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) + // Minimum execution time: 74_483_000 picoseconds. + Weight::from_parts(76_611_288, 4556) + // Standard Error: 9_013 + .saturating_add(Weight::from_parts(1_475_128, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) @@ -1050,8 +1068,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1434` // Estimated: `4556` - // Minimum execution time: 32_096_000 picoseconds. - Weight::from_parts(33_533_000, 4556) + // Minimum execution time: 32_598_000 picoseconds. + Weight::from_parts(33_350_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1066,10 +1084,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3735` - // Minimum execution time: 13_914_000 picoseconds. - Weight::from_parts(14_800_402, 3735) - // Standard Error: 146 - .saturating_add(Weight::from_parts(940, 0).saturating_mul(n.into())) + // Minimum execution time: 13_705_000 picoseconds. + Weight::from_parts(14_594_211, 3735) + // Standard Error: 141 + .saturating_add(Weight::from_parts(2_119, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1089,8 +1107,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_373_000 picoseconds. - Weight::from_parts(4_592_000, 0) + // Minimum execution time: 4_222_000 picoseconds. + Weight::from_parts(4_527_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -1099,17 +1117,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_662_000 picoseconds. - Weight::from_parts(17_531_000, 3719) + // Minimum execution time: 16_106_000 picoseconds. + Weight::from_parts(17_365_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -1124,11 +1146,11 @@ impl WeightInfo for () { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1971` + // Measured: `2143` // Estimated: `4556` - // Minimum execution time: 61_348_000 picoseconds. - Weight::from_parts(63_712_000, 4556) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Minimum execution time: 71_301_000 picoseconds. + Weight::from_parts(73_626_000, 4556) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -1143,8 +1165,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `804` // Estimated: `3719` - // Minimum execution time: 32_232_000 picoseconds. - Weight::from_parts(33_433_000, 3719) + // Minimum execution time: 32_363_000 picoseconds. + Weight::from_parts(33_400_000, 3719) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1156,8 +1178,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `572` // Estimated: `3719` - // Minimum execution time: 16_774_000 picoseconds. - Weight::from_parts(17_671_000, 3719) + // Minimum execution time: 16_686_000 picoseconds. + Weight::from_parts(17_294_000, 3719) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1167,8 +1189,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_724_000 picoseconds. - Weight::from_parts(17_181_000, 3719) + // Minimum execution time: 16_417_000 picoseconds. + Weight::from_parts(17_028_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1178,8 +1200,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_362_000 picoseconds. - Weight::from_parts(17_135_000, 3719) + // Minimum execution time: 16_350_000 picoseconds. + Weight::from_parts(16_905_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1191,8 +1213,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 14_125_000 picoseconds. - Weight::from_parts(14_705_000, 3702) + // Minimum execution time: 14_081_000 picoseconds. + Weight::from_parts(14_608_000, 3702) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1208,8 +1230,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1002` // Estimated: `3719` - // Minimum execution time: 61_699_000 picoseconds. - Weight::from_parts(63_605_000, 3719) + // Minimum execution time: 62_178_000 picoseconds. + Weight::from_parts(64_039_000, 3719) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1225,8 +1247,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `901` // Estimated: `4764` - // Minimum execution time: 64_930_000 picoseconds. - Weight::from_parts(66_068_000, 4764) + // Minimum execution time: 64_880_000 picoseconds. + Weight::from_parts(66_541_000, 4764) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/nomination-pools/test-staking/src/lib.rs b/substrate/frame/nomination-pools/test-staking/src/lib.rs index 865b7a71e68..98bff537c90 100644 --- a/substrate/frame/nomination-pools/test-staking/src/lib.rs +++ b/substrate/frame/nomination-pools/test-staking/src/lib.rs @@ -22,10 +22,12 @@ mod mock; use frame_support::{assert_noop, assert_ok, traits::Currency}; use mock::*; use pallet_nomination_pools::{ - BondedPools, Error as PoolsError, Event as PoolsEvent, LastPoolId, PoolMember, PoolMembers, - PoolState, + BondExtra, BondedPools, Error as PoolsError, Event as PoolsEvent, LastPoolId, PoolMember, + PoolMembers, PoolState, +}; +use pallet_staking::{ + CurrentEra, Error as StakingError, Event as StakingEvent, Payee, RewardDestination, }; -use pallet_staking::{CurrentEra, Event as StakingEvent, Payee, RewardDestination}; use sp_runtime::{bounded_btree_map, traits::Zero}; #[test] @@ -191,6 +193,131 @@ fn pool_lifecycle_e2e() { }) } +#[test] +fn pool_chill_e2e() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::minimum_balance(), 5); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + // have the pool nominate. + assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 50, joined: true }, + ] + ); + + // have two members join + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(21), 10, 1)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true }, + PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: 10, joined: true }, + ] + ); + + // in case depositor does not have more than `MinNominatorBond` staked, we can end up in + // situation where a member unbonding would cause pool balance to drop below + // `MinNominatorBond` and hence not allowed. This can happen if the `MinNominatorBond` is + // increased after the pool is created. + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + pallet_staking::ConfigOp::Set(55), // minimum nominator bond + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + )); + + // members can unbond as long as total stake of the pool is above min nominator bond + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10),); + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_eras.len(), 1); + assert_eq!(PoolMembers::::get(20).unwrap().points, 0); + + // this member cannot unbond since it will cause `pool stake < MinNominatorBond` + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(21), 21, 10), + StakingError::::InsufficientBond, + ); + + // members can call `chill` permissionlessly now + assert_ok!(Pools::chill(RuntimeOrigin::signed(20), 1)); + + // now another member can unbond. + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); + assert_eq!(PoolMembers::::get(21).unwrap().unbonding_eras.len(), 1); + assert_eq!(PoolMembers::::get(21).unwrap().points, 0); + + // nominator can not resume nomination until depositor have enough stake + assert_noop!( + Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3]), + PoolsError::::MinimumBondNotMet, + ); + + // other members joining pool does not affect the depositor's ability to resume nomination + assert_ok!(Pools::join(RuntimeOrigin::signed(22), 10, 1)); + + assert_noop!( + Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3]), + PoolsError::::MinimumBondNotMet, + ); + + // depositor can bond extra stake + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); + + // `chill` can not be called permissionlessly anymore + assert_noop!( + Pools::chill(RuntimeOrigin::signed(20), 1), + PoolsError::::NotNominator, + ); + + // now nominator can resume nomination + assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); + + // skip to make the unbonding period end. + CurrentEra::::set(Some(BondingDuration::get())); + + // members can now withdraw. + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 0)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Chilled { stash: POOL1_BONDED }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, // other member bonding + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, // depositor bond extra + StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 20 }, + ] + ); + }) +} + #[test] fn pool_slash_e2e() { new_test_ext().execute_with(|| { -- GitLab From 30c32e3d8456984e19a7edf9e851b0db6135f376 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Thu, 7 Mar 2024 10:45:56 +0000 Subject: [PATCH 57/86] move substrate-bip39 into polkadot-sdk (#3579) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Moves [substrate-bip39](https://github.com/paritytech/substrate-bip39) into substrate. All git history is preserved. Dependencies have been updated to use the same version as the rest of the repo. Fixes https://github.com/paritytech/polkadot-sdk/issues/1934. --------- Co-authored-by: Maciej Hirsz Co-authored-by: Maciej Hirsz <1096222+maciejhirsz@users.noreply.github.com> Co-authored-by: Gav Wood Co-authored-by: Stanislav Tkach Co-authored-by: Robert Habermeier Co-authored-by: Pierre Krieger Co-authored-by: Demi M. Obenour Co-authored-by: Bastian KΓΆcher Co-authored-by: NikVolf Co-authored-by: Bastian KΓΆcher Co-authored-by: Benjamin Kampmann Co-authored-by: Maciej Hirsz Co-authored-by: cheme Co-authored-by: adoerr <0xad@gmx.net> Co-authored-by: Jun Jiang Co-authored-by: Dan Shields <35669742+NukeManDan@users.noreply.github.com> Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> --- Cargo.lock | 172 +++---------- Cargo.toml | 1 + polkadot/node/core/approval-voting/Cargo.toml | 2 +- polkadot/node/subsystem-bench/Cargo.toml | 5 +- substrate/primitives/core/Cargo.toml | 4 +- substrate/utils/substrate-bip39/Cargo.toml | 31 +++ substrate/utils/substrate-bip39/README.md | 55 +++++ substrate/utils/substrate-bip39/src/lib.rs | 232 ++++++++++++++++++ 8 files changed, 359 insertions(+), 143 deletions(-) create mode 100644 substrate/utils/substrate-bip39/Cargo.toml create mode 100644 substrate/utils/substrate-bip39/README.md create mode 100644 substrate/utils/substrate-bip39/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 177d76b8706..4815e976b5d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -744,12 +744,6 @@ dependencies = [ "nodrop", ] -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - [[package]] name = "arrayvec" version = "0.7.4" @@ -1334,7 +1328,7 @@ dependencies = [ "ark-std 0.4.0", "dleq_vrf", "fflonk", - "merlin 3.0.0", + "merlin", "rand_chacha 0.3.1", "rand_core 0.6.4", "ring 0.1.0", @@ -1559,18 +1553,6 @@ dependencies = [ "constant_time_eq 0.3.0", ] -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding", - "byte-tools", - "byteorder", - "generic-array 0.12.4", -] - [[package]] name = "block-buffer" version = "0.9.0" @@ -1589,15 +1571,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = [ - "byte-tools", -] - [[package]] name = "blocking" version = "1.3.1" @@ -2634,9 +2607,9 @@ dependencies = [ [[package]] name = "clap-num" -version = "1.0.2" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "488557e97528174edaa2ee268b23a809e0c598213a4bbcb4f34575a46fda147e" +checksum = "0e063d263364859dc54fb064cedb7c122740cd4733644b14b176c097f51e8ab7" dependencies = [ "num-traits", ] @@ -2854,11 +2827,10 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "colored" -version = "2.0.4" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6" +checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" dependencies = [ - "is-terminal", "lazy_static", "windows-sys 0.48.0", ] @@ -2886,7 +2858,7 @@ dependencies = [ "ark-std 0.4.0", "fflonk", "getrandom_or_panic", - "merlin 3.0.0", + "merlin", "rand_chacha 0.3.1", ] @@ -3526,16 +3498,6 @@ dependencies = [ "subtle 2.5.0", ] -[[package]] -name = "crypto-mac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25fab6889090c8133f3deb8f73ba3c65a7f456f66436fc012a1b1e272b1e103e" -dependencies = [ - "generic-array 0.14.7", - "subtle 2.5.0", -] - [[package]] name = "ctr" version = "0.7.0" @@ -4396,19 +4358,6 @@ dependencies = [ "url", ] -[[package]] -name = "curve25519-dalek" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a9b85542f99a2dfa2a1b8e192662741c9859a846b296bef1c92ef9b58b5a216" -dependencies = [ - "byteorder", - "digest 0.8.1", - "rand_core 0.5.1", - "subtle 2.5.0", - "zeroize", -] - [[package]] name = "curve25519-dalek" version = "3.2.0" @@ -5238,12 +5187,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - [[package]] name = "fallible-iterator" version = "0.2.0" @@ -5353,7 +5296,7 @@ dependencies = [ "ark-poly", "ark-serialize 0.4.2", "ark-std 0.4.0", - "merlin 3.0.0", + "merlin", ] [[package]] @@ -6428,16 +6371,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac 0.11.0", - "digest 0.9.0", -] - [[package]] name = "hmac" version = "0.12.1" @@ -8124,18 +8057,6 @@ dependencies = [ "hash-db", ] -[[package]] -name = "merlin" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" -dependencies = [ - "byteorder", - "keccak", - "rand_core 0.5.1", - "zeroize", -] - [[package]] name = "merlin" version = "3.0.0" @@ -11642,19 +11563,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7924d1d0ad836f665c9065e26d016c673ece3993f30d340068b16f282afc1156" [[package]] -name = "paste" -version = "1.0.14" +name = "password-hash" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" +dependencies = [ + "base64ct", + "rand_core 0.6.4", + "subtle 2.5.0", +] [[package]] -name = "pbkdf2" -version = "0.8.0" +name = "paste" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa" -dependencies = [ - "crypto-mac 0.11.0", -] +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "pbkdf2" @@ -11663,6 +11586,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" dependencies = [ "digest 0.10.7", + "password-hash", ] [[package]] @@ -12454,7 +12378,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "log", - "merlin 3.0.0", + "merlin", "parity-scale-codec", "parking_lot 0.12.1", "polkadot-node-jaeger", @@ -13672,7 +13596,7 @@ dependencies = [ "sc-keystore", "sc-network", "sc-service", - "schnorrkel 0.9.1", + "schnorrkel 0.11.4", "serde", "serde_yaml", "sha1", @@ -14921,7 +14845,7 @@ dependencies = [ "blake2 0.10.6", "common", "fflonk", - "merlin 3.0.0", + "merlin", ] [[package]] @@ -17103,22 +17027,6 @@ dependencies = [ "hashbrown 0.13.2", ] -[[package]] -name = "schnorrkel" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "curve25519-dalek 2.1.3", - "merlin 2.0.1", - "rand_core 0.5.1", - "sha2 0.8.2", - "subtle 2.5.0", - "zeroize", -] - [[package]] name = "schnorrkel" version = "0.10.2" @@ -17128,7 +17036,7 @@ dependencies = [ "arrayref", "arrayvec 0.7.4", "curve25519-dalek-ng", - "merlin 3.0.0", + "merlin", "rand_core 0.6.4", "sha2 0.9.9", "subtle-ng", @@ -17146,7 +17054,7 @@ dependencies = [ "arrayvec 0.7.4", "curve25519-dalek 4.1.2", "getrandom_or_panic", - "merlin 3.0.0", + "merlin", "rand_core 0.6.4", "serde_bytes", "sha2 0.10.7", @@ -17510,18 +17418,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "sha2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - [[package]] name = "sha2" version = "0.9.9" @@ -17756,13 +17652,13 @@ dependencies = [ "hmac 0.12.1", "itertools 0.11.0", "libsecp256k1", - "merlin 3.0.0", + "merlin", "no-std-net", "nom", "num-bigint", "num-rational", "num-traits", - "pbkdf2 0.12.2", + "pbkdf2", "pin-project", "poly1305 0.8.0", "rand", @@ -18673,7 +18569,7 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", - "merlin 3.0.0", + "merlin", "parity-scale-codec", "parking_lot 0.12.1", "paste", @@ -19829,14 +19725,14 @@ dependencies = [ [[package]] name = "substrate-bip39" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e620c7098893ba667438b47169c00aacdd9e7c10e042250ce2b60b087ec97328" +version = "0.4.7" dependencies = [ - "hmac 0.11.0", - "pbkdf2 0.8.0", - "schnorrkel 0.9.1", - "sha2 0.9.9", + "bip39", + "hmac 0.12.1", + "pbkdf2", + "rustc-hex", + "schnorrkel 0.11.4", + "sha2 0.10.7", "zeroize", ] diff --git a/Cargo.toml b/Cargo.toml index a2e22e0b927..80068596685 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -496,6 +496,7 @@ members = [ "substrate/utils/frame/rpc/system", "substrate/utils/frame/try-runtime/cli", "substrate/utils/prometheus", + "substrate/utils/substrate-bip39", "substrate/utils/wasm-builder", "templates/minimal/node", diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index f6d89dbc152..2a5b6198b9a 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -35,7 +35,7 @@ sp-consensus = { path = "../../../../substrate/primitives/consensus/common", def sp-consensus-slots = { path = "../../../../substrate/primitives/consensus/slots", default-features = false } sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto", default-features = false, features = ["full_crypto"] } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -# should match schnorrkel +# rand_core should match schnorrkel rand_core = "0.6.2" rand_chacha = { version = "0.3.1" } rand = "0.8.5" diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index 726e7de4587..7c60ff48269 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -78,8 +78,9 @@ sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } sp-timestamp = { path = "../../../substrate/primitives/timestamp" } -schnorrkel = { version = "0.9.1", default-features = false } -rand_core = "0.6.2" # should match schnorrkel +schnorrkel = { version = "0.11.4", default-features = false } +# rand_core should match schnorrkel +rand_core = "0.6.2" rand_chacha = { version = "0.3.1" } paste = "1.0.14" orchestra = { version = "0.3.5", default-features = false, features = ["futures_channel"] } diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 8fcabfeb238..70ca7be6a2d 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -27,7 +27,7 @@ hash-db = { version = "0.16.0", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } bs58 = { version = "0.5.0", default-features = false, optional = true } rand = { version = "0.8.5", features = ["small_rng"], optional = true } -substrate-bip39 = { version = "0.4.5", optional = true } +substrate-bip39 = { path = "../../utils/substrate-bip39", optional = true } bip39 = { version = "2.0.0", default-features = false } zeroize = { version = "1.4.3", default-features = false } secrecy = { version = "0.8.0", default-features = false } @@ -116,7 +116,7 @@ std = [ "sp-std/std", "sp-storage/std", "ss58-registry/std", - "substrate-bip39", + "substrate-bip39/std", "thiserror", "tracing", "w3f-bls?/std", diff --git a/substrate/utils/substrate-bip39/Cargo.toml b/substrate/utils/substrate-bip39/Cargo.toml new file mode 100644 index 00000000000..a46f81ee24d --- /dev/null +++ b/substrate/utils/substrate-bip39/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "substrate-bip39" +version = "0.4.7" +license = "Apache-2.0" +description = "Converting BIP39 entropy to valid Substrate (sr25519) SecretKeys" +documentation = "https://docs.rs/substrate-bip39" +authors.workspace = true +edition.workspace = true +repository.workspace = true + +[dependencies] +hmac = "0.12.1" +pbkdf2 = { version = "0.12.2", default-features = false } +schnorrkel = { version = "0.11.4", default-features = false } +sha2 = { version = "0.10.7", default-features = false } +zeroize = { version = "1.4.3", default-features = false } + +[dev-dependencies] +bip39 = "2.0.0" +rustc-hex = "2.1.0" + +[features] +default = ["std"] +std = [ + "hmac/std", + "pbkdf2/std", + "schnorrkel/std", + "sha2/std", + "zeroize/alloc", + "zeroize/std", +] diff --git a/substrate/utils/substrate-bip39/README.md b/substrate/utils/substrate-bip39/README.md new file mode 100644 index 00000000000..368d18f406b --- /dev/null +++ b/substrate/utils/substrate-bip39/README.md @@ -0,0 +1,55 @@ +# Substrate BIP39 + +This is a crate for deriving secret keys for Ristretto compressed Ed25519 (should be compatible with Ed25519 at this +time) from BIP39 phrases. + +## Why? + +The natural approach here would be to use the 64-byte seed generated from the BIP39 phrase, and use that to construct +the key. This approach, while reasonable and fairly straight forward to implement, also means we would have to inherit +all the characteristics of seed generation. Since we are breaking compatibility with both BIP32 and BIP44 anyway (which +we are free to do as we are no longer using the Secp256k1 curve), there is also no reason why we should adhere to BIP39 +seed generation from the mnemonic. + +BIP39 seed generation was designed to be compatible with user supplied brain wallet phrases as well as being extensible +to wallets providing their own dictionaries and checksum mechanism. Issues with those two points: + +1. Brain wallets are a horrible idea, simply because humans are bad entropy generators. It's next to impossible to + educate users on how to use that feature in a secure manner. The 2048 rounds of PBKDF2 is a mere inconvenience that + offers no real protection against dictionary attacks for anyone equipped with modern consumer hardware. Brain wallets + have given users false sense of security. _People have lost money_ this way and wallet providers today tend to stick + to CSPRNG supplied dictionary phrases. + +2. Providing own dictionaries felt into the _you ain't gonna need it_ anti-pattern category on day 1. Wallet providers + (be it hardware or software) typically want their products to be compatibile with other wallets so that users can + migrate to their product without having to migrate all their assets. + +To achieve the above phrases have to be precisely encoded in _The One True Canonical Encoding_, for which UTF-8 NFKD was +chosen. This is largely irrelevant (and even ignored) for English phrases, as they encode to basically just ASCII in +virtualy every character encoding known to mankind, but immedietly becomes a problem for dictionaries that do use +non-ASCII characters. Even if the right encoding is used and implemented correctly, there are still [other caveats +present for some non-english dictionaries](https://github.com/bitcoin/bips/blob/master/bip-0039/bip-0039-wordlists.md), +such as normalizing spaces to a canonical form, or making some latin based characters equivalent to their base in +dictionary lookups (eg. Spanish `Γ±` and `n` are meant to be interchangeable). Thinking about all of this gives me a +headache, and opens doors for disagreements between buggy implementations, breaking compatibility. + +BIP39 does already provide a form of the mnemonic that is free from all of these issues: the entropy byte array. Since +veryfing the checksum requires that we recover the entropy from which the phrase was generated, no extra work is +actually needed here. Wallet implementators can encode the dictionaries in whatever encoding they find convenient (as +long as they are the standard BIP39 dictionaries), no harm in using UTF-16 string primitives that Java and JavaScript +provide. Since the dictionary is fixed and known, and the checksum is done on the entropy itself, the exact character +encoding used becomes irrelevant, as are the precise codepoints and amount of whitespace around the words. It is thus +much harder to create a buggy implementation. + +PBKDF2 was kept in place, along with the password. Using 24 words (with its 256 bits entropy) makes the extra hashing +redundant (if you could brute force 256 bit entropy, you can also just brute force secret keys), however some users +might be still using 12 word phrases from other applications. There is no good reason to prohibit users from recovering +their old wallets using 12 words that I can see, in which case the extra hashing does provide _some_ protection. +Passwords are also a feature that some power users find useful - particularly for creating a decoy address with a small +balance with empty password, while the funds proper are stored on an address that requires a password to be entered. + +## Why not ditch BIP39 altogether? + +Because there are hardware wallets that use a single phrase for the entire device, and operate multiple accounts on +multiple networks using that. A completely different wordlist would make their life much harder when it comes to +providing future Substrate support. diff --git a/substrate/utils/substrate-bip39/src/lib.rs b/substrate/utils/substrate-bip39/src/lib.rs new file mode 100644 index 00000000000..3673d20faed --- /dev/null +++ b/substrate/utils/substrate-bip39/src/lib.rs @@ -0,0 +1,232 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(not(feature = "std"))] +extern crate alloc; +#[cfg(not(feature = "std"))] +use alloc::string::String; + +use hmac::Hmac; +use pbkdf2::pbkdf2; +use schnorrkel::keys::MiniSecretKey; +use sha2::Sha512; +use zeroize::Zeroize; + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum Error { + InvalidEntropy, +} + +/// `entropy` should be a byte array from a correctly recovered and checksumed BIP39. +/// +/// This function accepts slices of different length for different word lengths: +/// +/// + 16 bytes for 12 words. +/// + 20 bytes for 15 words. +/// + 24 bytes for 18 words. +/// + 28 bytes for 21 words. +/// + 32 bytes for 24 words. +/// +/// Any other length will return an error. +/// +/// `password` is analog to BIP39 seed generation itself, with an empty string being defalt. +pub fn mini_secret_from_entropy(entropy: &[u8], password: &str) -> Result { + let seed = seed_from_entropy(entropy, password)?; + Ok(MiniSecretKey::from_bytes(&seed[..32]).expect("Length is always correct; qed")) +} + +/// Similar to `mini_secret_from_entropy`, except that it provides the 64-byte seed directly. +pub fn seed_from_entropy(entropy: &[u8], password: &str) -> Result<[u8; 64], Error> { + if entropy.len() < 16 || entropy.len() > 32 || entropy.len() % 4 != 0 { + return Err(Error::InvalidEntropy); + } + + let mut salt = String::with_capacity(8 + password.len()); + salt.push_str("mnemonic"); + salt.push_str(password); + + let mut seed = [0u8; 64]; + + pbkdf2::>(entropy, salt.as_bytes(), 2048, &mut seed) + .map_err(|_| Error::InvalidEntropy)?; + + salt.zeroize(); + + Ok(seed) +} + +#[cfg(test)] +mod test { + use super::*; + + #[cfg(not(feature = "std"))] + use alloc::vec::Vec; + + use bip39::{Language, Mnemonic}; + use rustc_hex::FromHex; + + // phrase, entropy, seed, expanded secret_key + // + // ALL SEEDS GENERATED USING "Substrate" PASSWORD! + static VECTORS: &[[&str; 3]] = &[ + [ + "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about", + "00000000000000000000000000000000", + "44e9d125f037ac1d51f0a7d3649689d422c2af8b1ec8e00d71db4d7bf6d127e33f50c3d5c84fa3e5399c72d6cbbbbc4a49bf76f76d952f479d74655a2ef2d453", + ], + [ + "legal winner thank year wave sausage worth useful legal winner thank yellow", + "7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f", + "4313249608fe8ac10fd5886c92c4579007272cb77c21551ee5b8d60b780416850f1e26c1f4b8d88ece681cb058ab66d6182bc2ce5a03181f7b74c27576b5c8bf", + ], + [ + "letter advice cage absurd amount doctor acoustic avoid letter advice cage above", + "80808080808080808080808080808080", + "27f3eb595928c60d5bc91a4d747da40ed236328183046892ed6cd5aa9ae38122acd1183adf09a89839acb1e6eaa7fb563cc958a3f9161248d5a036e0d0af533d", + ], + [ + "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo wrong", + "ffffffffffffffffffffffffffffffff", + "227d6256fd4f9ccaf06c45eaa4b2345969640462bbb00c5f51f43cb43418c7a753265f9b1e0c0822c155a9cabc769413ecc14553e135fe140fc50b6722c6b9df", + ], + [ + "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon agent", + "000000000000000000000000000000000000000000000000", + "44e9d125f037ac1d51f0a7d3649689d422c2af8b1ec8e00d71db4d7bf6d127e33f50c3d5c84fa3e5399c72d6cbbbbc4a49bf76f76d952f479d74655a2ef2d453", + ], + [ + "legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal will", + "7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f", + "cb1d50e14101024a88905a098feb1553d4306d072d7460e167a60ccb3439a6817a0afc59060f45d999ddebc05308714733c9e1e84f30feccddd4ad6f95c8a445", + ], + [ + "letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter always", + "808080808080808080808080808080808080808080808080", + "9ddecf32ce6bee77f867f3c4bb842d1f0151826a145cb4489598fe71ac29e3551b724f01052d1bc3f6d9514d6df6aa6d0291cfdf997a5afdb7b6a614c88ab36a", + ], + [ + "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo when", + "ffffffffffffffffffffffffffffffffffffffffffffffff", + "8971cb290e7117c64b63379c97ed3b5c6da488841bd9f95cdc2a5651ac89571e2c64d391d46e2475e8b043911885457cd23e99a28b5a18535fe53294dc8e1693", + ], + [ + "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art", + "0000000000000000000000000000000000000000000000000000000000000000", + "44e9d125f037ac1d51f0a7d3649689d422c2af8b1ec8e00d71db4d7bf6d127e33f50c3d5c84fa3e5399c72d6cbbbbc4a49bf76f76d952f479d74655a2ef2d453", + ], + [ + "legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth title", + "7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f", + "3037276a5d05fcd7edf51869eb841bdde27c574dae01ac8cfb1ea476f6bea6ef57ab9afe14aea1df8a48f97ae25b37d7c8326e49289efb25af92ba5a25d09ed3", + ], + [ + "letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic bless", + "8080808080808080808080808080808080808080808080808080808080808080", + "2c9c6144a06ae5a855453d98c3dea470e2a8ffb78179c2e9eb15208ccca7d831c97ddafe844ab933131e6eb895f675ede2f4e39837bb5769d4e2bc11df58ac42", + ], + [ + "zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo vote", + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "047e89ef7739cbfe30da0ad32eb1720d8f62441dd4f139b981b8e2d0bd412ed4eb14b89b5098c49db2301d4e7df4e89c21e53f345138e56a5e7d63fae21c5939", + ], + [ + "ozone drill grab fiber curtain grace pudding thank cruise elder eight picnic", + "9e885d952ad362caeb4efe34a8e91bd2", + "f4956be6960bc145cdab782e649a5056598fd07cd3f32ceb73421c3da27833241324dc2c8b0a4d847eee457e6d4c5429f5e625ece22abaa6a976e82f1ec5531d", + ], + [ + "gravity machine north sort system female filter attitude volume fold club stay feature office ecology stable narrow fog", + "6610b25967cdcca9d59875f5cb50b0ea75433311869e930b", + "fbcc5229ade0c0ff018cb7a329c5459f91876e4dde2a97ddf03c832eab7f26124366a543f1485479c31a9db0d421bda82d7e1fe562e57f3533cb1733b001d84d", + ], + [ + "hamster diagram private dutch cause delay private meat slide toddler razor book happy fancy gospel tennis maple dilemma loan word shrug inflict delay length", + "68a79eaca2324873eacc50cb9c6eca8cc68ea5d936f98787c60c7ebc74e6ce7c", + "7c60c555126c297deddddd59f8cdcdc9e3608944455824dd604897984b5cc369cad749803bb36eb8b786b570c9cdc8db275dbe841486676a6adf389f3be3f076", + ], + [ + "scheme spot photo card baby mountain device kick cradle pact join borrow", + "c0ba5a8e914111210f2bd131f3d5e08d", + "c12157bf2506526c4bd1b79a056453b071361538e9e2c19c28ba2cfa39b5f23034b974e0164a1e8acd30f5b4c4de7d424fdb52c0116bfc6a965ba8205e6cc121", + ], + [ + "horn tenant knee talent sponsor spell gate clip pulse soap slush warm silver nephew swap uncle crack brave", + "6d9be1ee6ebd27a258115aad99b7317b9c8d28b6d76431c3", + "23766723e970e6b79dec4d5e4fdd627fd27d1ee026eb898feb9f653af01ad22080c6f306d1061656d01c4fe9a14c05f991d2c7d8af8730780de4f94cd99bd819", + ], + [ + "panda eyebrow bullet gorilla call smoke muffin taste mesh discover soft ostrich alcohol speed nation flash devote level hobby quick inner drive ghost inside", + "9f6a2878b2520799a44ef18bc7df394e7061a224d2c33cd015b157d746869863", + "f4c83c86617cb014d35cd87d38b5ef1c5d5c3d58a73ab779114438a7b358f457e0462c92bddab5a406fe0e6b97c71905cf19f925f356bc673ceb0e49792f4340", + ], + [ + "cat swing flag economy stadium alone churn speed unique patch report train", + "23db8160a31d3e0dca3688ed941adbf3", + "719d4d4de0638a1705bf5237262458983da76933e718b2d64eb592c470f3c5d222e345cc795337bb3da393b94375ff4a56cfcd68d5ea25b577ee9384d35f4246", + ], + [ + "light rule cinnamon wrap drastic word pride squirrel upgrade then income fatal apart sustain crack supply proud access", + "8197a4a47f0425faeaa69deebc05ca29c0a5b5cc76ceacc0", + "7ae1291db32d16457c248567f2b101e62c5549d2a64cd2b7605d503ec876d58707a8d663641e99663bc4f6cc9746f4852e75e7e54de5bc1bd3c299c9a113409e", + ], + [ + "all hour make first leader extend hole alien behind guard gospel lava path output census museum junior mass reopen famous sing advance salt reform", + "066dca1a2bb7e8a1db2832148ce9933eea0f3ac9548d793112d9a95c9407efad", + "a911a5f4db0940b17ecb79c4dcf9392bf47dd18acaebdd4ef48799909ebb49672947cc15f4ef7e8ef47103a1a91a6732b821bda2c667e5b1d491c54788c69391", + ], + [ + "vessel ladder alter error federal sibling chat ability sun glass valve picture", + "f30f8c1da665478f49b001d94c5fc452", + "4e2314ca7d9eebac6fe5a05a5a8d3546bc891785414d82207ac987926380411e559c885190d641ff7e686ace8c57db6f6e4333c1081e3d88d7141a74cf339c8f", + ], + [ + "scissors invite lock maple supreme raw rapid void congress muscle digital elegant little brisk hair mango congress clump", + "c10ec20dc3cd9f652c7fac2f1230f7a3c828389a14392f05", + "7a83851102849edc5d2a3ca9d8044d0d4f00e5c4a292753ed3952e40808593251b0af1dd3c9ed9932d46e8608eb0b928216a6160bd4fc775a6e6fbd493d7c6b2", + ], + [ + "void come effort suffer camp survey warrior heavy shoot primary clutch crush open amazing screen patrol group space point ten exist slush involve unfold", + "f585c11aec520db57dd353c69554b21a89b20fb0650966fa0a9d6f74fd989d8f", + "938ba18c3f521f19bd4a399c8425b02c716844325b1a65106b9d1593fbafe5e0b85448f523f91c48e331995ff24ae406757cff47d11f240847352b348ff436ed", + ] + ]; + + #[test] + fn vectors_are_correct() { + for vector in VECTORS { + let phrase = vector[0]; + + let expected_entropy: Vec = vector[1].from_hex().unwrap(); + let expected_seed: Vec = vector[2].from_hex().unwrap(); + + let mnemonic = Mnemonic::parse_in(Language::English, phrase).unwrap(); + let seed = seed_from_entropy(&mnemonic.to_entropy(), "Substrate").unwrap(); + let secret = mini_secret_from_entropy(&mnemonic.to_entropy(), "Substrate") + .unwrap() + .to_bytes(); + + assert_eq!( + mnemonic.to_entropy(), + &expected_entropy[..], + "Entropy is incorrect for {}", + phrase + ); + assert_eq!(&seed[..], &expected_seed[..], "Seed is incorrect for {}", phrase); + assert_eq!(&secret[..], &expected_seed[..32], "Secret is incorrect for {}", phrase); + } + } +} -- GitLab From 50cc1c2f7efa0e61ac773b8e89171c9640315475 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 7 Mar 2024 11:40:30 +0000 Subject: [PATCH 58/86] Add documentation around pallet coupling (#3542) substrate.io deprecation companion: https://github.com/substrate-developer-hub/substrate-docs/pull/2139 pba-content companion: https://github.com/Polkadot-Blockchain-Academy/pba-content/pull/978 partially inspired by: https://github.com/paritytech/polkadot-sdk/issues/3535 --------- Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com> --- Cargo.lock | 1 + docs/sdk/Cargo.toml | 1 + docs/sdk/src/polkadot_sdk/frame_runtime.rs | 144 +++++---- .../reference_docs/frame_pallet_coupling.rs | 296 ++++++++++++++++++ docs/sdk/src/reference_docs/mod.rs | 4 + substrate/frame/support/src/lib.rs | 5 +- 6 files changed, 374 insertions(+), 77 deletions(-) create mode 100644 docs/sdk/src/reference_docs/frame_pallet_coupling.rs diff --git a/Cargo.lock b/Cargo.lock index 4815e976b5d..e38f09db4d3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13332,6 +13332,7 @@ dependencies = [ "frame-support", "frame-system", "kitchensink-runtime", + "pallet-assets", "pallet-aura", "pallet-authorship", "pallet-balances", diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 735b3d7df61..8b498d407c0 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -65,6 +65,7 @@ pallet-aura = { path = "../../substrate/frame/aura", default-features = false } # Pallets and FRAME internals pallet-timestamp = { path = "../../substrate/frame/timestamp" } pallet-balances = { path = "../../substrate/frame/balances" } +pallet-assets = { path = "../../substrate/frame/assets" } pallet-transaction-payment = { path = "../../substrate/frame/transaction-payment" } pallet-utility = { path = "../../substrate/frame/utility" } pallet-multisig = { path = "../../substrate/frame/multisig" } diff --git a/docs/sdk/src/polkadot_sdk/frame_runtime.rs b/docs/sdk/src/polkadot_sdk/frame_runtime.rs index c9eba7d64bd..f178fc82bf4 100644 --- a/docs/sdk/src/polkadot_sdk/frame_runtime.rs +++ b/docs/sdk/src/polkadot_sdk/frame_runtime.rs @@ -87,93 +87,89 @@ //! * writing a runtime in pure Rust, as done in [this template](https://github.com/JoshOrndorff/frameless-node-template). //! * writing a runtime in AssemblyScript,as explored in [this project](https://github.com/LimeChain/subsembly). -#[cfg(test)] -mod tests { - use frame::prelude::*; +use frame::prelude::*; - /// A FRAME based pallet. This `mod` is the entry point for everything else. All - /// `#[pallet::xxx]` macros must be defined in this `mod`. Although, frame also provides an - /// experimental feature to break these parts into different `mod`s. See [`pallet_examples`] for - /// more. - #[docify::export] - #[frame::pallet(dev_mode)] - pub mod pallet { - use super::*; +/// A FRAME based pallet. This `mod` is the entry point for everything else. All +/// `#[pallet::xxx]` macros must be defined in this `mod`. Although, frame also provides an +/// experimental feature to break these parts into different `mod`s. See [`pallet_examples`] for +/// more. +#[docify::export] +#[frame::pallet(dev_mode)] +pub mod pallet { + use super::*; - /// The configuration trait of a pallet. Mandatory. Allows a pallet to receive types at a - /// later point from the runtime that wishes to contain it. It allows the pallet to be - /// parameterized over both types and values. - #[pallet::config] - pub trait Config: frame_system::Config { - /// A type that is not known now, but the runtime that will contain this pallet will - /// know it later, therefore we define it here as an associated type. - type RuntimeEvent: IsType<::RuntimeEvent> - + From>; + /// The configuration trait of a pallet. Mandatory. Allows a pallet to receive types at a + /// later point from the runtime that wishes to contain it. It allows the pallet to be + /// parameterized over both types and values. + #[pallet::config] + pub trait Config: frame_system::Config { + /// A type that is not known now, but the runtime that will contain this pallet will + /// know it later, therefore we define it here as an associated type. + type RuntimeEvent: IsType<::RuntimeEvent> + From>; - /// A parameterize-able value that we receive later via the `Get<_>` trait. - type ValueParameter: Get; + /// A parameterize-able value that we receive later via the `Get<_>` trait. + type ValueParameter: Get; - /// Similar to [`Config::ValueParameter`], but using `const`. Both are functionally - /// equal, but offer different tradeoffs. - const ANOTHER_VALUE_PARAMETER: u32; - } + /// Similar to [`Config::ValueParameter`], but using `const`. Both are functionally + /// equal, but offer different tradeoffs. + const ANOTHER_VALUE_PARAMETER: u32; + } - /// A mandatory struct in each pallet. All functions callable by external users (aka. - /// transactions) must be attached to this type (see [`frame::pallet_macros::call`]). For - /// convenience, internal (private) functions can also be attached to this type. - #[pallet::pallet] - pub struct Pallet(PhantomData); + /// A mandatory struct in each pallet. All functions callable by external users (aka. + /// transactions) must be attached to this type (see [`frame::pallet_macros::call`]). For + /// convenience, internal (private) functions can also be attached to this type. + #[pallet::pallet] + pub struct Pallet(PhantomData); - /// The events tha this pallet can emit. - #[pallet::event] - pub enum Event {} + /// The events tha this pallet can emit. + #[pallet::event] + pub enum Event {} - /// A storage item that this pallet contains. This will be part of the state root trie/root - /// of the blockchain. - #[pallet::storage] - pub type Value = StorageValue; + /// A storage item that this pallet contains. This will be part of the state root trie/root + /// of the blockchain. + #[pallet::storage] + pub type Value = StorageValue; - /// All *dispatchable* call functions (aka. transactions) are attached to `Pallet` in a - /// `impl` block. - #[pallet::call] - impl Pallet { - /// This will be callable by external users, and has two u32s as a parameter. - pub fn some_dispatchable( - _origin: OriginFor, - _param: u32, - _other_para: u32, - ) -> DispatchResult { - Ok(()) - } + /// All *dispatchable* call functions (aka. transactions) are attached to `Pallet` in a + /// `impl` block. + #[pallet::call] + impl Pallet { + /// This will be callable by external users, and has two u32s as a parameter. + pub fn some_dispatchable( + _origin: OriginFor, + _param: u32, + _other_para: u32, + ) -> DispatchResult { + Ok(()) } } +} - /// A simple runtime that contains the above pallet and `frame_system`, the mandatory pallet of - /// all runtimes. This runtime is for testing, but it shares a lot of similarities with a *real* - /// runtime. - #[docify::export] - pub mod runtime { - use super::pallet as pallet_example; - use frame::{prelude::*, testing_prelude::*}; - - // The major macro that amalgamates pallets into `enum Runtime` - construct_runtime!( - pub enum Runtime { - System: frame_system, - Example: pallet_example, - } - ); +/// A simple runtime that contains the above pallet and `frame_system`, the mandatory pallet of +/// all runtimes. This runtime is for testing, but it shares a lot of similarities with a *real* +/// runtime. +#[docify::export] +pub mod runtime { + use super::pallet as pallet_example; + use frame::{prelude::*, testing_prelude::*}; - // These `impl` blocks specify the parameters of each pallet's `trait Config`. - #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] - impl frame_system::Config for Runtime { - type Block = MockBlock; + // The major macro that amalgamates pallets into `enum Runtime` + construct_runtime!( + pub enum Runtime { + System: frame_system, + Example: pallet_example, } + ); - impl pallet_example::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ValueParameter = ConstU32<42>; - const ANOTHER_VALUE_PARAMETER: u32 = 42; - } + // These `impl` blocks specify the parameters of each pallet's `trait Config`. + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + impl pallet_example::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ValueParameter = ConstU32<42>; + const ANOTHER_VALUE_PARAMETER: u32 = 42; } } diff --git a/docs/sdk/src/reference_docs/frame_pallet_coupling.rs b/docs/sdk/src/reference_docs/frame_pallet_coupling.rs new file mode 100644 index 00000000000..942717ecfb2 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_pallet_coupling.rs @@ -0,0 +1,296 @@ +//! # FRAME Pallet Coupling +//! +//! This reference document explains how FRAME pallets can be combined to interact together. +//! +//! It is suggested to re-read [`crate::polkadot_sdk::frame_runtime`], notably the information +//! around [`frame::pallet_macros::config`]. Recall that: +//! +//! > Configuration trait of a pallet: It allows a pallet to receive types at a later +//! > point from the runtime that wishes to contain it. It allows the pallet to be parameterized +//! > over both types and values. +//! +//! ## Context, Background +//! +//! FRAME pallets, as per described in [`crate::polkadot_sdk::frame_runtime`] are: +//! +//! > A pallet is a unit of encapsulated logic. It has a clearly defined responsibility and can be +//! linked to other pallets. +//! +//! That is to say: +//! +//! * *encapsulated*: Ideally, a FRAME pallet contains encapsulated logic which has clear +//! boundaries. It is generally a bad idea to build a single monolithic pallet that does multiple +//! things, such as handling currencies, identities and staking all at the same time. +//! * *linked to other pallets*: But, adhering extensively to the above also hinders the ability to +//! write useful applications. Pallets often need to work with each other, communicate and use +//! each other's functionalities. +//! +//! The broad principle that allows pallets to be linked together is the same way through which a +//! pallet uses its `Config` trait to receive types and values from the runtime that contains it. +//! +//! There are generally two ways to achieve this: +//! +//! 1. Tight coupling pallets +//! 2. Loose coupling pallets +//! +//! To explain the difference between the two, consider two pallets, `A` and `B`. In both cases, `A` +//! wants to use some functionality exposed by `B`. +//! +//! When tightly coupling pallets, `A` can only exist in a runtime if `B` is also present in the +//! same runtime. That is, `A` is expressing that can only work if `B` is present. +//! +//! This translates to the following Rust code: +//! +//! ``` +//! trait Pallet_B_Config {} +//! trait Pallet_A_Config: Pallet_B_Config {} +//! ``` +//! +//! Contrary, when pallets are loosely coupled, `A` expresses that some functionality, expressed via +//! a trait `F`, needs to be fulfilled. This trait is then implemented by `B`, and the two pallets +//! are linked together at the runtime level. This means that `A` only relies on the implementation +//! of `F`, which may be `B`, or another implementation of `F`. +//! +//! This translates to the following Rust code: +//! +//! ``` +//! trait F {} +//! trait Pallet_A_Config { +//! type F: F; +//! } +//! // Pallet_B will implement and fulfill `F`. +//! ``` +//! +//! ## Example +//! +//! Consider the following example, in which `pallet-foo` needs another pallet to provide the block +//! author to it, and `pallet-author` which has access to this information. +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", pallet_foo)] +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", pallet_author)] +//! +//! ### Tight Coupling Pallets +//! +//! To tightly couple `pallet-foo` and `pallet-author`, we use Rust's supertrait system. When a +//! pallet makes its own `trait Config` be bounded by another pallet's `trait Config`, it is +//! expressing two things: +//! +//! 1. that it can only exist in a runtime if the other pallet is also present. +//! 2. that it can use the other pallet's functionality. +//! +//! `pallet-foo`'s `Config` would then look like: +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", tight_config)] +//! +//! And `pallet-foo` can use the method exposed by `pallet_author::Pallet` directly: +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", tight_usage)] +//! +//! +//! ### Loosely Coupling Pallets +//! +//! If `pallet-foo` wants to *not* rely on `pallet-author` directly, it can leverage its +//! `Config`'s associated types. First, we need a trait to express the functionality that +//! `pallet-foo` wants to obtain: +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", AuthorProvider)] +//! +//! > We sometimes refer to such traits that help two pallets interact as "glue traits". +//! +//! Next, `pallet-foo` states that it needs this trait to be provided to it, at the runtime level, +//! via an associated type: +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", loose_config)] +//! +//! Then, `pallet-foo` can use this trait to obtain the block author, without knowing where it comes +//! from: +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", loose_usage)] +//! +//! Then, if `pallet-author` implements this glue-trait: +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", pallet_author_provider)] +//! +//! And upon the creation of the runtime, the two pallets are linked together as such: +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", runtime_author_provider)] +//! +//! Crucially, when using loose coupling, we gain the flexibility of providing different +//! implementations of `AuthorProvider`, such that different users of a `pallet-foo` can use +//! different ones, without any code change being needed. For example, in the code snippets of this +//! module, you can fund [`OtherAuthorProvider`] which is an alternative implementation of +//! [`AuthorProvider`]. +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", other_author_provider)] +//! +//! A common pattern in polkadot-sdk is to provide an implementation of such glu traits for the unit +//! type as a "default/test behavior". +#![doc = docify::embed!("./src/reference_docs/frame_pallet_coupling.rs", unit_author_provider)] +//! +//! ## Frame System +//! +//! With the above information in context, we can conclude that **`frame_system` is a special pallet +//! that is tightly coupled with every other pallet**. This is because it provides the fundamental +//! system functionality that every pallet needs, such as some types like +//! [`frame::prelude::frame_system::Config::AccountId`], +//! [`frame::prelude::frame_system::Config::Hash`], and some functionality such as block number, +//! etc. +//! +//! ## Recap +//! +//! To recap, consider the following rules of thumb: +//! +//! * In all cases, try and break down big pallets apart with clear boundaries of responsibility. In +//! general, it is easier to argue about multiple pallet if they only communicate together via a +//! known trait, rather than having access to all of each others public items, such as storage and +//! dispatchables. +//! * If a group of pallets are meant to work together, and but are not foreseen to be generalized, +//! or used by others, consider tightly coupling pallets, *if it simplifies the development*. +//! * If a pallet needs a functionality provided by another pallet, but multiple implementations can +//! be foreseen, consider loosely coupling pallets. +//! +//! For example, all pallets in `polkadot-sdk` that needed to work with currencies could have been +//! tightly coupled with [`pallet_balances`]. But, `polkadot-sdk` also provides [`pallet_assets`] +//! (and more implementations by the community), therefore all pallets use traits to loosely couple +//! with balances or assets pallet. More on this in [`crate::reference_docs::frame_currency`]. +//! +//! ## Further References +//! +//! - +//! - +//! +//! [`AuthorProvider`]: crate::reference_docs::frame_pallet_coupling::AuthorProvider +//! [`OtherAuthorProvider`]: crate::reference_docs::frame_pallet_coupling::OtherAuthorProvider + +#![allow(unused)] + +use frame::prelude::*; + +#[docify::export] +#[frame::pallet] +pub mod pallet_foo { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + impl Pallet { + fn do_stuff_with_author() { + // needs block author here + } + } +} + +#[docify::export] +#[frame::pallet] +pub mod pallet_author { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + impl Pallet { + pub fn author() -> T::AccountId { + todo!("somehow has access to the block author and can return it here") + } + } +} + +#[frame::pallet] +pub mod pallet_foo_tight { + use super::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export(tight_config)] + /// This pallet can only live in a runtime that has both `frame_system` and `pallet_author`. + #[pallet::config] + pub trait Config: frame_system::Config + pallet_author::Config {} + + #[docify::export(tight_usage)] + impl Pallet { + // anywhere in `pallet-foo`, we can call into `pallet-author` directly, namely because + // `T: pallet_author::Config` + fn do_stuff_with_author() { + let _ = pallet_author::Pallet::::author(); + } + } +} + +#[docify::export] +/// Abstraction over "something that can provide the block author". +pub trait AuthorProvider { + fn author() -> AccountId; +} + +#[frame::pallet] +pub mod pallet_foo_loose { + use super::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export(loose_config)] + #[pallet::config] + pub trait Config: frame_system::Config { + /// This pallet relies on the existence of something that implements [`AuthorProvider`], + /// which may or may not be `pallet-author`. + type AuthorProvider: AuthorProvider; + } + + #[docify::export(loose_usage)] + impl Pallet { + fn do_stuff_with_author() { + let _ = T::AuthorProvider::author(); + } + } +} + +#[docify::export(pallet_author_provider)] +impl AuthorProvider for pallet_author::Pallet { + fn author() -> T::AccountId { + pallet_author::Pallet::::author() + } +} + +pub struct OtherAuthorProvider; + +#[docify::export(other_author_provider)] +impl AuthorProvider for OtherAuthorProvider { + fn author() -> AccountId { + todo!("somehow get the block author here") + } +} + +#[docify::export(unit_author_provider)] +impl AuthorProvider for () { + fn author() -> AccountId { + todo!("somehow get the block author here") + } +} + +pub mod runtime { + use super::*; + use cumulus_pallet_aura_ext::pallet; + use frame::{runtime::prelude::*, testing_prelude::*}; + + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletFoo: pallet_foo_loose, + PalletAuthor: pallet_author, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + impl pallet_author::Config for Runtime {} + + #[docify::export(runtime_author_provider)] + impl pallet_foo_loose::Config for Runtime { + type AuthorProvider = pallet_author::Pallet; + // which is also equivalent to + // type AuthorProvider = PalletAuthor; + } +} diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index 3fd815d2a15..f4b52208e2f 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -103,3 +103,7 @@ pub mod light_nodes; /// Learn about the offchain workers, how they function, and how to use them, as provided by the /// [`frame`] APIs. pub mod frame_offchain_workers; + +/// Learn about the different ways through which multiple [`frame`] pallets can be combined to work +/// together. +pub mod frame_pallet_coupling; diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index b5b1ac09c60..6ea7fa33e77 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -1531,9 +1531,8 @@ pub mod pallet_macros { /// The attribute currently only supports enum definitions, and identifiers that are named /// `FreezeReason`, `HoldReason`, `LockId` or `SlashReason`. Arbitrary identifiers for the /// enum are not supported. The aggregate enum generated by - /// [`frame_support::construct_runtime`](frame_support::construct_runtime) will have the - /// name of `RuntimeFreezeReason`, `RuntimeHoldReason`, `RuntimeLockId` and - /// `RuntimeSlashReason` respectively. + /// [`frame_support::construct_runtime`] will have the name of `RuntimeFreezeReason`, + /// `RuntimeHoldReason`, `RuntimeLockId` and `RuntimeSlashReason` respectively. /// /// NOTE: The aggregate enum generated by `construct_runtime` generates a conversion /// function from the pallet enum to the aggregate enum, and automatically derives the -- GitLab From c89b3997df47408a324e8870693e80ddc4a42b7d Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Thu, 7 Mar 2024 12:47:30 +0100 Subject: [PATCH 59/86] Contracts: Remove changelog (#3605) This is not maintained, and we should be able to get information from the release changelog --- substrate/frame/contracts/CHANGELOG.md | 116 ------------------------- 1 file changed, 116 deletions(-) delete mode 100644 substrate/frame/contracts/CHANGELOG.md diff --git a/substrate/frame/contracts/CHANGELOG.md b/substrate/frame/contracts/CHANGELOG.md deleted file mode 100644 index aca94e5b149..00000000000 --- a/substrate/frame/contracts/CHANGELOG.md +++ /dev/null @@ -1,116 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -The semantic versioning guarantees cover the interface to the Substrate runtime which -includes this pallet as a dependency. This module will also add storage migrations whenever -changes require it. Stability with regard to offchain tooling is explicitly excluded from -this guarantee: For example adding a new field to an in-storage data structure will require -changes to frontends to properly display it. However, those changes will still be regarded -as a minor version bump. - -The interface provided to smart contracts will adhere to semver with one exception: Even -major version bumps will be backwards compatible with regard to already deployed contracts. -In other words: Upgrading this pallet will not break pre-existing contracts. - -## [Unreleased] - -### Added - -- Forbid calling back to contracts after switching to runtime -[#13443](https://github.com/paritytech/substrate/pull/13443) - -- Allow contracts to dispatch calls into the runtime (**unstable**) -[#9276](https://github.com/paritytech/substrate/pull/9276) - -- New version of `seal_call` that offers more features. -[#8909](https://github.com/paritytech/substrate/pull/8909) - -- New `instantiate` RPC that allows clients to dry-run contract instantiation. -[#8451](https://github.com/paritytech/substrate/pull/8451) - -- New version of `seal_random` which exposes additional information. -[#8329](https://github.com/paritytech/substrate/pull/8329) - -### Changed - -- Replaced storage rent with automatic storage deposits -[#9669](https://github.com/paritytech/substrate/pull/9669) -[#10082](https://github.com/paritytech/substrate/pull/10082) - -- Replaced `seal_println` with the `seal_debug_message` API which allows outputting debug -messages to the console and RPC clients. -[#8773](https://github.com/paritytech/substrate/pull/8773) -[#9550](https://github.com/paritytech/substrate/pull/9550) - -- Make storage and fields of `Schedule` private to the crate. -[#8359](https://github.com/paritytech/substrate/pull/8359) - -### Fixed - -- Remove pre-charging which caused wrongly estimated weights -[#8976](https://github.com/paritytech/substrate/pull/8976) - -## [v3.0.0] 2021-02-25 - -This version constitutes the first release that brings any stability guarantees (see above). - -### Added - -- Emit an event when a contract terminates (self-destructs). -[#8014](https://github.com/paritytech/substrate/pull/8014) - -- Charge rent for code stored on the chain in addition to the already existing -rent that is paid for data storage. -[#7935](https://github.com/paritytech/substrate/pull/7935) - -- Allow the runtime to configure per storage item costs in addition -to the already existing per byte costs. -[#7819](https://github.com/paritytech/substrate/pull/7819) - -- Contracts are now deleted lazily so that the user who removes a contract -does not need to pay for the deletion of the contract storage. -[#7740](https://github.com/paritytech/substrate/pull/7740) - -- Allow runtime authors to define chain extensions in order to provide custom -functionality to contracts. -[#7548](https://github.com/paritytech/substrate/pull/7548) -[#8003](https://github.com/paritytech/substrate/pull/8003) - -- Proper weights which are fully automated by benchmarking. -[#6715](https://github.com/paritytech/substrate/pull/6715) -[#7017](https://github.com/paritytech/substrate/pull/7017) -[#7361](https://github.com/paritytech/substrate/pull/7361) - -### Changed - -- Collect the rent for one block during instantiation. -[#7847](https://github.com/paritytech/substrate/pull/7847) - -- Instantiation takes a `salt` argument to allow for easier instantion of the -same code by the same sender. -[#7482](https://github.com/paritytech/substrate/pull/7482) - -- Improve the information returned by the `contracts_call` RPC. -[#7468](https://github.com/paritytech/substrate/pull/7468) - -- Simplify the node configuration necessary to add this module. -[#7409](https://github.com/paritytech/substrate/pull/7409) - -### Fixed - -- Consider the code size of a contract in the weight that is charged for -loading a contract from storage. -[#8086](https://github.com/paritytech/substrate/pull/8086) - -- Fix possible overflow in storage size calculation -[#7885](https://github.com/paritytech/substrate/pull/7885) - -- Cap the surcharge reward that can be claimed. -[#7870](https://github.com/paritytech/substrate/pull/7870) - -- Fix a possible DoS vector where contracts could allocate too large buffers. -[#7818](https://github.com/paritytech/substrate/pull/7818) -- GitLab From 6792d4b5b84e80ab771aa32666ce08dbcca39b97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Thu, 7 Mar 2024 20:10:12 +0800 Subject: [PATCH 60/86] Disable flaky test (#3602) Unfortunately, the flakiness wasn't fixed by https://github.com/paritytech/polkadot-sdk/pull/3595. Let's disable the test in the meanwhile since it is hanging on the CI a lot. --------- Co-authored-by: Liam Aharon --- substrate/client/consensus/manual-seal/src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index f04c0d42d60..c3d360f0719 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -467,7 +467,10 @@ mod tests { assert_eq!(client.header(created_block.hash).unwrap().unwrap().number, 1) } - #[tokio::test] + // TODO: enable once the flakiness is fixed + // See https://github.com/paritytech/polkadot-sdk/issues/3603 + //#[tokio::test] + #[allow(unused)] async fn instant_seal_delayed_finalize() { let builder = TestClientBuilder::new(); let (client, select_chain) = builder.build_with_longest_chain(); -- GitLab From de9411aa4c29a28cb2c964cce8153bdaa1bdc7ea Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Thu, 7 Mar 2024 13:39:39 +0100 Subject: [PATCH 61/86] Contracts: Remove unstable on lock/unlock_delegate_dependency host fns (#3606) Oversight from PR https://github.com/paritytech/polkadot-sdk/pull/3384. These 2 functions should have been tagged as stable --- prdoc/pr_3606.prdoc | 9 +++++++++ substrate/frame/contracts/src/wasm/runtime.rs | 2 -- 2 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 prdoc/pr_3606.prdoc diff --git a/prdoc/pr_3606.prdoc b/prdoc/pr_3606.prdoc new file mode 100644 index 00000000000..18b71de9477 --- /dev/null +++ b/prdoc/pr_3606.prdoc @@ -0,0 +1,9 @@ +title: "[pallet_contracts] mark lock/unlock_delegate_dependency as stable" + +doc: + - audience: Runtime Dev + description: | + Lock and unlock delegate dependency are stable now, so we can mark them as such. + +crates: + - name: pallet-contracts diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index f440c818166..402ff78dcde 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -2305,7 +2305,6 @@ pub mod env { /// Adds a new delegate dependency to the contract. /// See [`pallet_contracts_uapi::HostFn::lock_delegate_dependency`]. - #[unstable] fn lock_delegate_dependency(ctx: _, memory: _, code_hash_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::LockDelegateDependency)?; let code_hash = ctx.read_sandbox_memory_as(memory, code_hash_ptr)?; @@ -2315,7 +2314,6 @@ pub mod env { /// Removes the delegate dependency from the contract. /// see [`pallet_contracts_uapi::HostFn::unlock_delegate_dependency`]. - #[unstable] fn unlock_delegate_dependency(ctx: _, memory: _, code_hash_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::UnlockDelegateDependency)?; let code_hash = ctx.read_sandbox_memory_as(memory, code_hash_ptr)?; -- GitLab From 629506ce061db76d31d4f7a81f4a497752b27259 Mon Sep 17 00:00:00 2001 From: Muharem Date: Thu, 7 Mar 2024 16:13:17 +0100 Subject: [PATCH 62/86] Deprecate xcm::body::TREASURER_INDEX constant, use standard Treasury varian instead (#3608) Deprecate the `xcm::body::TREASURER_INDEX` constant and use the standard Treasury variant from the `xcm::BodyId` type instead. To align with the production runtimes: https://github.com/polkadot-fellows/runtimes/pull/149 --- .../collectives/collectives-westend/src/xcm_config.rs | 2 +- polkadot/runtime/westend/constants/src/lib.rs | 2 ++ polkadot/runtime/westend/src/xcm_config.rs | 6 ++---- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index cc25cbda0a4..87f637d5b59 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -59,7 +59,7 @@ parameter_types! { pub const GovernanceLocation: Location = Location::parent(); pub const FellowshipAdminBodyId: BodyId = BodyId::Index(xcm_constants::body::FELLOWSHIP_ADMIN_INDEX); pub AssetHub: Location = (Parent, Parachain(1000)).into(); - pub const TreasurerBodyId: BodyId = BodyId::Index(xcm_constants::body::TREASURER_INDEX); + pub const TreasurerBodyId: BodyId = BodyId::Treasury; pub AssetHubUsdtId: AssetId = (PalletInstance(50), GeneralIndex(1984)).into(); pub UsdtAssetHub: LocatableAssetId = LocatableAssetId { location: AssetHub::get(), diff --git a/polkadot/runtime/westend/constants/src/lib.rs b/polkadot/runtime/westend/constants/src/lib.rs index 3bc27177d7a..c98f4b114fd 100644 --- a/polkadot/runtime/westend/constants/src/lib.rs +++ b/polkadot/runtime/westend/constants/src/lib.rs @@ -130,6 +130,8 @@ pub mod xcm { const ROOT_INDEX: u32 = 0; // The bodies corresponding to the Polkadot OpenGov Origins. pub const FELLOWSHIP_ADMIN_INDEX: u32 = 1; + #[deprecated = "Will be removed after August 2024; Use `xcm::latest::BodyId::Treasury` \ + instead"] pub const TREASURER_INDEX: u32 = 2; } } diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index c3aa75a86a4..c57af987ca7 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -34,9 +34,7 @@ use runtime_common::{ }; use sp_core::ConstU32; use westend_runtime_constants::{ - currency::CENTS, - system_parachain::*, - xcm::body::{FELLOWSHIP_ADMIN_INDEX, TREASURER_INDEX}, + currency::CENTS, system_parachain::*, xcm::body::FELLOWSHIP_ADMIN_INDEX, }; use xcm::latest::prelude::*; use xcm_builder::{ @@ -231,7 +229,7 @@ parameter_types! { // FellowshipAdmin pluralistic body. pub const FellowshipAdminBodyId: BodyId = BodyId::Index(FELLOWSHIP_ADMIN_INDEX); // `Treasurer` pluralistic body. - pub const TreasurerBodyId: BodyId = BodyId::Index(TREASURER_INDEX); + pub const TreasurerBodyId: BodyId = BodyId::Treasury; } /// Type to convert the `GeneralAdmin` origin to a Plurality `Location` value. -- GitLab From f4fbddec420384e3550703ea7c9db3ea923f66e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Andr=C3=A9s=20Dorado=20Su=C3=A1rez?= Date: Thu, 7 Mar 2024 18:19:52 -0500 Subject: [PATCH 63/86] fix(pallet-benchmarking): split test functions in v2 (#3574) Closes #376 --------- Co-authored-by: command-bot <> --- .../collator-selection/src/benchmarking.rs | 4 +- .../collective-content/src/benchmarking.rs | 2 +- .../runtime/common/src/identity_migrator.rs | 2 +- .../parachains/src/hrmp/benchmarking.rs | 2 +- prdoc/pr_3574.prdoc | 23 +++++++++++ substrate/frame/alliance/src/benchmarking.rs | 2 +- substrate/frame/identity/src/benchmarking.rs | 4 +- substrate/frame/lottery/src/benchmarking.rs | 1 - .../frame/support/procedural/src/benchmark.rs | 38 ++++++++++++++++++- .../system/benchmarking/src/extensions.rs | 2 +- .../frame/system/benchmarking/src/lib.rs | 2 +- 11 files changed, 67 insertions(+), 15 deletions(-) create mode 100644 prdoc/pr_3574.prdoc diff --git a/cumulus/pallets/collator-selection/src/benchmarking.rs b/cumulus/pallets/collator-selection/src/benchmarking.rs index 2c40f4dd0ea..e2af74a6e60 100644 --- a/cumulus/pallets/collator-selection/src/benchmarking.rs +++ b/cumulus/pallets/collator-selection/src/benchmarking.rs @@ -22,9 +22,7 @@ use super::*; #[allow(unused)] use crate::Pallet as CollatorSelection; use codec::Decode; -use frame_benchmarking::{ - account, impl_benchmark_test_suite, v2::*, whitelisted_caller, BenchmarkError, -}; +use frame_benchmarking::{account, v2::*, whitelisted_caller, BenchmarkError}; use frame_support::traits::{Currency, EnsureOrigin, Get, ReservableCurrency}; use frame_system::{pallet_prelude::BlockNumberFor, EventRecord, RawOrigin}; use pallet_authorship::EventHandler; diff --git a/cumulus/parachains/pallets/collective-content/src/benchmarking.rs b/cumulus/parachains/pallets/collective-content/src/benchmarking.rs index 943386a8427..3d6bf073778 100644 --- a/cumulus/parachains/pallets/collective-content/src/benchmarking.rs +++ b/cumulus/parachains/pallets/collective-content/src/benchmarking.rs @@ -16,7 +16,7 @@ //! The pallet benchmarks. use super::{Pallet as CollectiveContent, *}; -use frame_benchmarking::{impl_benchmark_test_suite, v2::*}; +use frame_benchmarking::v2::*; use frame_support::traits::EnsureOrigin; fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { diff --git a/polkadot/runtime/common/src/identity_migrator.rs b/polkadot/runtime/common/src/identity_migrator.rs index 0dfb03b06ba..bf334a63e95 100644 --- a/polkadot/runtime/common/src/identity_migrator.rs +++ b/polkadot/runtime/common/src/identity_migrator.rs @@ -31,7 +31,7 @@ use pallet_identity; use sp_core::Get; #[cfg(feature = "runtime-benchmarks")] -use frame_benchmarking::{account, impl_benchmark_test_suite, v2::*, BenchmarkError}; +use frame_benchmarking::{account, v2::*, BenchmarkError}; pub trait WeightInfo { fn reap_identity(r: u32, s: u32) -> Weight; diff --git a/polkadot/runtime/parachains/src/hrmp/benchmarking.rs b/polkadot/runtime/parachains/src/hrmp/benchmarking.rs index 2cb49c88d43..c6baf2f30cf 100644 --- a/polkadot/runtime/parachains/src/hrmp/benchmarking.rs +++ b/polkadot/runtime/parachains/src/hrmp/benchmarking.rs @@ -22,7 +22,7 @@ use crate::{ paras::{Pallet as Paras, ParaKind, ParachainsCache}, shared::Pallet as Shared, }; -use frame_benchmarking::{impl_benchmark_test_suite, v2::*, whitelisted_caller}; +use frame_benchmarking::{v2::*, whitelisted_caller}; use frame_support::{assert_ok, traits::Currency}; type BalanceOf = diff --git a/prdoc/pr_3574.prdoc b/prdoc/pr_3574.prdoc new file mode 100644 index 00000000000..99868ea8a13 --- /dev/null +++ b/prdoc/pr_3574.prdoc @@ -0,0 +1,23 @@ +title: Generate test functions for each benchmark with benchmarking v2 + +doc: + - audience: Runtime Dev + description: | + This PR fixes an issue where using `impl_benchmark_test_suite` macro + within modules that use the benchmarking v2 macros (`#[benchmarks]` + and `#[instance_benchmarks]`) always produced a single test called + `test_benchmarks` instead of a separate benchmark test for every + benchmark (noted with the `#[benchmark]` macro). + + By using this macro from now on, new tests will be created named + `test_benchmark_{name}` where `name` is the name of the benchmark + function. Those tests will be nested inside the module intended for + benchmark functions. + + Also, when using `impl_benchmark_test_suite` inside the module, + the import of such marco will not be necessary, so any explicit + import of it will be marked as unused, the same way it works for + v1 macros so far. + +crates: + - name: frame-support-procedural diff --git a/substrate/frame/alliance/src/benchmarking.rs b/substrate/frame/alliance/src/benchmarking.rs index 9fe0e29b42c..4ccd0fc08f6 100644 --- a/substrate/frame/alliance/src/benchmarking.rs +++ b/substrate/frame/alliance/src/benchmarking.rs @@ -26,7 +26,7 @@ use core::{ }; use sp_runtime::traits::{Bounded, Hash, StaticLookup}; -use frame_benchmarking::{account, impl_benchmark_test_suite, v2::*, BenchmarkError}; +use frame_benchmarking::{account, v2::*, BenchmarkError}; use frame_support::traits::{EnsureOrigin, Get, UnfilteredDispatchable}; use frame_system::{pallet_prelude::BlockNumberFor, Pallet as System, RawOrigin as SystemOrigin}; diff --git a/substrate/frame/identity/src/benchmarking.rs b/substrate/frame/identity/src/benchmarking.rs index fe2fb0b0489..e0b45eeecd1 100644 --- a/substrate/frame/identity/src/benchmarking.rs +++ b/substrate/frame/identity/src/benchmarking.rs @@ -23,9 +23,7 @@ use super::*; use crate::Pallet as Identity; use codec::Encode; -use frame_benchmarking::{ - account, impl_benchmark_test_suite, v2::*, whitelisted_caller, BenchmarkError, -}; +use frame_benchmarking::{account, v2::*, whitelisted_caller, BenchmarkError}; use frame_support::{ assert_ok, ensure, traits::{EnsureOrigin, Get, OnFinalize, OnInitialize}, diff --git a/substrate/frame/lottery/src/benchmarking.rs b/substrate/frame/lottery/src/benchmarking.rs index 1510d250dbe..123b425b976 100644 --- a/substrate/frame/lottery/src/benchmarking.rs +++ b/substrate/frame/lottery/src/benchmarking.rs @@ -23,7 +23,6 @@ use super::*; use crate::Pallet as Lottery; use frame_benchmarking::{ - impl_benchmark_test_suite, v1::{account, whitelisted_caller, BenchmarkError}, v2::*, }; diff --git a/substrate/frame/support/procedural/src/benchmark.rs b/substrate/frame/support/procedural/src/benchmark.rs index 6ded82d91aa..d16ad2aaa51 100644 --- a/substrate/frame/support/procedural/src/benchmark.rs +++ b/substrate/frame/support/procedural/src/benchmark.rs @@ -431,7 +431,7 @@ pub fn benchmarks( let mut benchmarks_by_name_mappings: Vec = Vec::new(); let test_idents: Vec = benchmark_names_str .iter() - .map(|n| Ident::new(format!("test_{}", n).as_str(), Span::call_site())) + .map(|n| Ident::new(format!("test_benchmark_{}", n).as_str(), Span::call_site())) .collect(); for i in 0..benchmark_names.len() { let name_ident = &benchmark_names[i]; @@ -441,6 +441,37 @@ pub fn benchmarks( benchmarks_by_name_mappings.push(quote!(#name_str => Self::#test_ident())) } + let impl_test_function = content + .iter_mut() + .find_map(|item| { + let Item::Macro(item_macro) = item else { + return None; + }; + + if !item_macro + .mac + .path + .segments + .iter() + .any(|s| s.ident == "impl_benchmark_test_suite") + { + return None; + } + + let tokens = item_macro.mac.tokens.clone(); + *item = Item::Verbatim(quote! {}); + + Some(quote! { + impl_test_function!( + (#( {} #benchmark_names )*) + (#( #extra_benchmark_names )*) + (#( #skip_meta_benchmark_names )*) + #tokens + ); + }) + }) + .unwrap_or(quote! {}); + // emit final quoted tokens let res = quote! { #(#mod_attrs) @@ -676,6 +707,8 @@ pub fn benchmarks( } } } + + #impl_test_function } #mod_vis use #mod_name::*; }; @@ -733,7 +766,8 @@ fn expand_benchmark( let setup_stmts = benchmark_def.setup_stmts; let verify_stmts = benchmark_def.verify_stmts; let last_stmt = benchmark_def.last_stmt; - let test_ident = Ident::new(format!("test_{}", name.to_string()).as_str(), Span::call_site()); + let test_ident = + Ident::new(format!("test_benchmark_{}", name.to_string()).as_str(), Span::call_site()); // unroll params (prepare for quoting) let unrolled = UnrolledParams::from(&benchmark_def.params); diff --git a/substrate/frame/system/benchmarking/src/extensions.rs b/substrate/frame/system/benchmarking/src/extensions.rs index 1721501dead..bfc3b4343a6 100644 --- a/substrate/frame/system/benchmarking/src/extensions.rs +++ b/substrate/frame/system/benchmarking/src/extensions.rs @@ -19,7 +19,7 @@ #![cfg(feature = "runtime-benchmarks")] -use frame_benchmarking::{account, impl_benchmark_test_suite, v2::*, BenchmarkError}; +use frame_benchmarking::{account, v2::*, BenchmarkError}; use frame_support::{ dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, weights::Weight, diff --git a/substrate/frame/system/benchmarking/src/lib.rs b/substrate/frame/system/benchmarking/src/lib.rs index ebd16275bcb..c9f0869d3bc 100644 --- a/substrate/frame/system/benchmarking/src/lib.rs +++ b/substrate/frame/system/benchmarking/src/lib.rs @@ -21,7 +21,7 @@ #![cfg(feature = "runtime-benchmarks")] use codec::Encode; -use frame_benchmarking::{impl_benchmark_test_suite, v2::*}; +use frame_benchmarking::v2::*; use frame_support::{dispatch::DispatchClass, storage, traits::Get}; use frame_system::{Call, Pallet as System, RawOrigin}; use sp_core::storage::well_known_keys; -- GitLab From 2aa006e094e248110af14a742d4e2f56b7931959 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 8 Mar 2024 00:31:59 +0100 Subject: [PATCH 64/86] Refactor polkadot-parachain service for more code reuse (#3511) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a refactoring, no changes to the logic are included (if you find some, report :D). ## Change Overview In https://github.com/paritytech/polkadot-sdk/issues/2455, dependency on actual runtimes was removed for the system parachains. This means that the trait bounds we have on various `start_node_xy` do not check anything anymore, since they all point to the same runtime. Exception is asset-hub-polkadot which uses a different key type. This PR unifies the different nodes as much as possible. `start_node_impl` is doing the heavy lifting and has been made a bit more flexible to support the rpc extension instantiation that was there before. The generics for `Runtime` and `AuraId` have been removed where possible. The fake runtime is imported as `FakeRuntime` to make it very clear to readers that it is not the generic parameter. Multiple nodes where using the same import queue/start_consensus closure, they have been unified. --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Francisco Aguirre Co-authored-by: Adrian Catangiu Co-authored-by: Egor_P Co-authored-by: Dmitry Markin Co-authored-by: Xiliang Chen Co-authored-by: Andrei Eres Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Co-authored-by: Alin Dima Co-authored-by: gupnik Co-authored-by: Liam Aharon Co-authored-by: DΓ³nal Murray --- cumulus/polkadot-parachain/src/command.rs | 84 +- cumulus/polkadot-parachain/src/service.rs | 2329 ++++++--------------- 2 files changed, 717 insertions(+), 1696 deletions(-) diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 4d44879af51..9ba7b7876b3 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -399,7 +399,7 @@ macro_rules! construct_partials { Runtime::AssetHubPolkadot => { let $partials = new_partial::( &$config, - crate::service::aura_build_import_queue::<_, AssetHubPolkadotAuraId>, + crate::service::build_relay_to_aura_import_queue::<_, AssetHubPolkadotAuraId>, )?; $code }, @@ -413,28 +413,21 @@ macro_rules! construct_partials { Runtime::People(_) => { let $partials = new_partial::( &$config, - crate::service::aura_build_import_queue::<_, AuraId>, + crate::service::build_relay_to_aura_import_queue::<_, AuraId>, )?; $code }, Runtime::GluttonWestend | Runtime::Glutton | Runtime::Shell | Runtime::Seedling => { let $partials = new_partial::( &$config, - crate::service::shell_build_import_queue, + crate::service::build_shell_import_queue, )?; $code }, - Runtime::ContractsRococo => { + Runtime::ContractsRococo | Runtime::Penpal(_) | Runtime::Default => { let $partials = new_partial::( &$config, - crate::service::contracts_rococo_build_import_queue, - )?; - $code - }, - Runtime::Penpal(_) | Runtime::Default => { - let $partials = new_partial::( - &$config, - crate::service::rococo_parachain_build_import_queue, + crate::service::build_aura_import_queue, )?; $code }, @@ -450,7 +443,7 @@ macro_rules! construct_async_run { runner.async_run(|$config| { let $components = new_partial::( &$config, - crate::service::aura_build_import_queue::<_, AssetHubPolkadotAuraId>, + crate::service::build_relay_to_aura_import_queue::<_, AssetHubPolkadotAuraId>, )?; let task_manager = $components.task_manager; { $( $code )* }.map(|v| (v, task_manager)) @@ -467,7 +460,7 @@ macro_rules! construct_async_run { runner.async_run(|$config| { let $components = new_partial::( &$config, - crate::service::aura_build_import_queue::<_, AuraId>, + crate::service::build_relay_to_aura_import_queue::<_, AuraId>, )?; let task_manager = $components.task_manager; { $( $code )* }.map(|v| (v, task_manager)) @@ -480,30 +473,20 @@ macro_rules! construct_async_run { runner.async_run(|$config| { let $components = new_partial::( &$config, - crate::service::shell_build_import_queue, + crate::service::build_shell_import_queue, )?; let task_manager = $components.task_manager; { $( $code )* }.map(|v| (v, task_manager)) }) } - Runtime::ContractsRococo => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::contracts_rococo_build_import_queue, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::Penpal(_) | Runtime::Default => { + Runtime::ContractsRococo | Runtime::Penpal(_) | Runtime::Default => { runner.async_run(|$config| { let $components = new_partial::< RuntimeApi, _, >( &$config, - crate::service::rococo_parachain_build_import_queue, + crate::service::build_aura_import_queue, )?; let task_manager = $components.task_manager; { $( $code )* }.map(|v| (v, task_manager)) @@ -715,25 +698,19 @@ pub fn run() -> Result<()> { .map_err(Into::into), CollectivesPolkadot => - crate::service::start_generic_aura_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + crate::service::start_generic_aura_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), CollectivesWestend => - crate::service::start_generic_aura_lookahead_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + crate::service::start_generic_aura_lookahead_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), Seedling | Shell => - crate::service::start_shell_node::( + crate::service::start_shell_node( config, polkadot_config, collator_options, @@ -758,36 +735,24 @@ pub fn run() -> Result<()> { BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type { chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal => - crate::service::start_generic_aura_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + crate::service::start_generic_aura_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama | chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal => - crate::service::start_generic_aura_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + crate::service::start_generic_aura_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend | chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal | chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment => - crate::service::start_generic_aura_lookahead_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + crate::service::start_generic_aura_lookahead_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), chain_spec::bridge_hubs::BridgeHubRuntimeType::Rococo | chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoLocal | chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoDevelopment => - crate::service::start_generic_aura_lookahead_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + crate::service::start_generic_aura_lookahead_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), } @@ -800,10 +765,7 @@ pub fn run() -> Result<()> { chain_spec::coretime::CoretimeRuntimeType::Westend | chain_spec::coretime::CoretimeRuntimeType::WestendLocal | chain_spec::coretime::CoretimeRuntimeType::WestendDevelopment => - crate::service::start_generic_aura_lookahead_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + crate::service::start_generic_aura_lookahead_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), } @@ -822,10 +784,7 @@ pub fn run() -> Result<()> { .map_err(Into::into), Glutton | GluttonWestend => - crate::service::start_basic_lookahead_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + crate::service::start_basic_lookahead_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), @@ -837,10 +796,7 @@ pub fn run() -> Result<()> { chain_spec::people::PeopleRuntimeType::Westend | chain_spec::people::PeopleRuntimeType::WestendLocal | chain_spec::people::PeopleRuntimeType::WestendDevelopment => - crate::service::start_generic_aura_lookahead_node::< - RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) + crate::service::start_generic_aura_lookahead_node(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), } diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 386551102e4..ddf595ca70c 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -36,12 +36,13 @@ use cumulus_primitives_core::{ ParaId, }; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; +use sc_rpc::DenyUnsafe; use sp_core::Pair; use jsonrpsee::RpcModule; -use crate::{fake_runtime_api::aura::RuntimeApi, rpc}; -pub use parachains_common::{AccountId, Balance, Block, Hash, Header, Nonce}; +use crate::{fake_runtime_api::aura::RuntimeApi as FakeRuntimeApi, rpc}; +pub use parachains_common::{AccountId, AuraId, Balance, Block, Hash, Header, Nonce}; use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; use futures::{lock::Mutex, prelude::*}; @@ -85,141 +86,6 @@ type ParachainBackend = TFullBackend; type ParachainBlockImport = TParachainBlockImport>, ParachainBackend>; -/// Native executor instance. -pub struct ShellRuntimeExecutor; - -impl sc_executor::NativeExecutionDispatch for ShellRuntimeExecutor { - type ExtendHostFunctions = (); - - fn dispatch(method: &str, data: &[u8]) -> Option> { - shell_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - shell_runtime::native_version() - } -} - -/// Native Asset Hub Westend (Westmint) executor instance. -pub struct AssetHubWestendExecutor; -impl sc_executor::NativeExecutionDispatch for AssetHubWestendExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - asset_hub_westend_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - asset_hub_westend_runtime::native_version() - } -} - -/// Native Westend Collectives executor instance. -pub struct CollectivesWestendRuntimeExecutor; - -impl sc_executor::NativeExecutionDispatch for CollectivesWestendRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - collectives_westend_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - collectives_westend_runtime::native_version() - } -} - -/// Native BridgeHubRococo executor instance. -pub struct BridgeHubRococoRuntimeExecutor; -impl sc_executor::NativeExecutionDispatch for BridgeHubRococoRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - bridge_hub_rococo_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - bridge_hub_rococo_runtime::native_version() - } -} - -/// Native `CoretimeRococo` executor instance. -pub struct CoretimeRococoRuntimeExecutor; -impl sc_executor::NativeExecutionDispatch for CoretimeRococoRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - fn dispatch(method: &str, data: &[u8]) -> Option> { - coretime_rococo_runtime::api::dispatch(method, data) - } - fn native_version() -> sc_executor::NativeVersion { - coretime_rococo_runtime::native_version() - } -} - -/// Native `CoretimeWestend` executor instance. -pub struct CoretimeWestendRuntimeExecutor; -impl sc_executor::NativeExecutionDispatch for CoretimeWestendRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - fn dispatch(method: &str, data: &[u8]) -> Option> { - coretime_westend_runtime::api::dispatch(method, data) - } - fn native_version() -> sc_executor::NativeVersion { - coretime_westend_runtime::native_version() - } -} - -/// Native contracts executor instance. -pub struct ContractsRococoRuntimeExecutor; -impl sc_executor::NativeExecutionDispatch for ContractsRococoRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - contracts_rococo_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - contracts_rococo_runtime::native_version() - } -} - -/// Native Westend Glutton executor instance. -pub struct GluttonWestendRuntimeExecutor; - -impl sc_executor::NativeExecutionDispatch for GluttonWestendRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - glutton_westend_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - glutton_westend_runtime::native_version() - } -} - -/// Native `PeopleWestend` executor instance. -pub struct PeopleWestendRuntimeExecutor; -impl sc_executor::NativeExecutionDispatch for PeopleWestendRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - fn dispatch(method: &str, data: &[u8]) -> Option> { - people_westend_runtime::api::dispatch(method, data) - } - fn native_version() -> sc_executor::NativeVersion { - people_westend_runtime::native_version() - } -} - -/// Native `PeopleRococo` executor instance. -pub struct PeopleRococoRuntimeExecutor; -impl sc_executor::NativeExecutionDispatch for PeopleRococoRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - fn dispatch(method: &str, data: &[u8]) -> Option> { - people_rococo_runtime::api::dispatch(method, data) - } - fn native_version() -> sc_executor::NativeVersion { - people_rococo_runtime::native_version() - } -} - /// Assembly of PartialComponents (enough to run chain ops subcommands) pub type Service = PartialComponents< ParachainClient, @@ -323,12 +189,11 @@ where }) } -/// Start a shell node with the given parachain `Configuration` and relay chain `Configuration`. +/// Start a node with the given parachain `Configuration` and relay chain `Configuration`. /// -/// This is the actual implementation that is abstract over the executor and the runtime api for -/// shell nodes. +/// This is the actual implementation that is abstract over the executor and the runtime api. #[sc_tracing::logging::prefix_logs_with("Parachain")] -async fn start_shell_node_impl( +async fn start_node_impl( parachain_config: Configuration, polkadot_config: Configuration, collator_options: CollatorOptions, @@ -347,8 +212,15 @@ where + sp_api::ApiExt + sp_offchain::OffchainWorkerApi + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo, - RB: Fn(Arc>) -> Result, sc_service::Error> + + cumulus_primitives_core::CollectCollationInfo + + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + + frame_rpc_system::AccountNonceApi, + RB: Fn( + DenyUnsafe, + Arc>, + Arc, + Arc>>, + ) -> Result, sc_service::Error> + 'static, BIQ: FnOnce( Arc>, @@ -372,6 +244,7 @@ where CollatorPair, OverseerHandle, Arc>) + Send + Sync>, + Arc, ) -> Result<(), sc_service::Error>, { let parachain_config = prepare_node_config(parachain_config); @@ -383,7 +256,6 @@ where let backend = params.backend.clone(); let mut task_manager = params.task_manager; - let (relay_chain_interface, collator_key) = build_relay_chain_interface( polkadot_config, ¶chain_config, @@ -415,8 +287,20 @@ where }) .await?; - let rpc_client = client.clone(); - let rpc_builder = Box::new(move |_, _| rpc_ext_builder(rpc_client.clone())); + let rpc_builder = { + let client = client.clone(); + let transaction_pool = transaction_pool.clone(); + let backend_for_rpc = backend.clone(); + + Box::new(move |deny_unsafe, _| { + rpc_ext_builder( + deny_unsafe, + client.clone(), + backend_for_rpc.clone(), + transaction_pool.clone(), + ) + }) + }; sc_service::spawn_tasks(sc_service::SpawnTasksParams { rpc_builder, @@ -493,6 +377,7 @@ where collator_key.expect("Command line arguments do not allow this. qed"), overseer_handle, announce_block, + backend.clone(), )?; } @@ -501,676 +386,179 @@ where Ok((task_manager, client)) } -/// Start a node with the given parachain `Configuration` and relay chain `Configuration`. -/// -/// This is the actual implementation that is abstract over the executor and the runtime api. -#[sc_tracing::logging::prefix_logs_with("Parachain")] -async fn start_node_impl( +/// Build the import queue for Aura-based runtimes. +pub fn build_aura_import_queue( + client: Arc>, + block_import: ParachainBlockImport, + config: &Configuration, + telemetry: Option, + task_manager: &TaskManager, +) -> Result, sc_service::Error> { + let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; + + cumulus_client_consensus_aura::import_queue::< + sp_consensus_aura::sr25519::AuthorityPair, + _, + _, + _, + _, + _, + >(cumulus_client_consensus_aura::ImportQueueParams { + block_import, + client, + create_inherent_data_providers: move |_, _| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + + Ok((slot, timestamp)) + }, + registry: config.prometheus_registry(), + spawner: &task_manager.spawn_essential_handle(), + telemetry, + }) + .map_err(Into::into) +} + +/// Start a rococo parachain node. +pub async fn start_rococo_parachain_node( parachain_config: Configuration, polkadot_config: Configuration, collator_options: CollatorOptions, - sybil_resistance_level: CollatorSybilResistance, para_id: ParaId, - _rpc_ext_builder: RB, - build_import_queue: BIQ, - start_consensus: SC, hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> +) -> sc_service::error::Result<(TaskManager, Arc>)> { + start_node_impl::( + parachain_config, + polkadot_config, + collator_options, + CollatorSybilResistance::Resistant, // Aura + para_id, + build_parachain_rpc_extensions::, + build_aura_import_queue, + start_lookahead_aura_consensus, + hwbench, + ) + .await +} + +/// Build the import queue for the shell runtime. +pub fn build_shell_import_queue( + client: Arc>, + block_import: ParachainBlockImport, + config: &Configuration, + _: Option, + task_manager: &TaskManager, +) -> Result, sc_service::Error> { + cumulus_client_consensus_relay_chain::import_queue( + client, + block_import, + |_, _| async { Ok(()) }, + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + ) + .map_err(Into::into) +} + +fn build_parachain_rpc_extensions( + deny_unsafe: sc_rpc::DenyUnsafe, + client: Arc>, + backend: Arc, + pool: Arc>>, +) -> Result, sc_service::Error> where RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + frame_rpc_system::AccountNonceApi, - RB: Fn(Arc>) -> Result, sc_service::Error>, - BIQ: FnOnce( - Arc>, - ParachainBlockImport, - &Configuration, - Option, - &TaskManager, - ) -> Result, sc_service::Error>, - SC: FnOnce( - Arc>, - ParachainBlockImport, - Option<&Registry>, - Option, - &TaskManager, - Arc, - Arc>>, - Arc>, - KeystorePtr, - Duration, - ParaId, - CollatorPair, - OverseerHandle, - Arc>) + Send + Sync>, - Arc, - ) -> Result<(), sc_service::Error>, { - let parachain_config = prepare_node_config(parachain_config); + let deps = rpc::FullDeps { client, pool, deny_unsafe }; - let params = new_partial::(¶chain_config, build_import_queue)?; - let (block_import, mut telemetry, telemetry_worker_handle) = params.other; + rpc::create_full(deps, backend).map_err(Into::into) +} - let client = params.client.clone(); - let backend = params.backend.clone(); +fn build_contracts_rpc_extensions( + deny_unsafe: sc_rpc::DenyUnsafe, + client: Arc>, + _backend: Arc, + pool: Arc>>, +) -> Result, sc_service::Error> { + let deps = crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe }; - let mut task_manager = params.task_manager; - let (relay_chain_interface, collator_key) = build_relay_chain_interface( + crate::rpc::create_contracts_rococo(deps).map_err(Into::into) +} + +/// Start a polkadot-shell parachain node. +pub async fn start_shell_node( + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, +) -> sc_service::error::Result<(TaskManager, Arc>)> { + start_node_impl::( + parachain_config, polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - collator_options.clone(), - hwbench.clone(), + collator_options, + CollatorSybilResistance::Unresistant, // free-for-all consensus + para_id, + |_, _, _, _| Ok(RpcModule::new(())), + build_shell_import_queue, + start_relay_chain_consensus, + hwbench, ) .await - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - - let validator = parachain_config.role.is_authority(); - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let transaction_pool = params.transaction_pool.clone(); - let import_queue_service = params.import_queue.service(); - let net_config = FullNetworkConfiguration::new(¶chain_config.network); - - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - build_network(BuildNetworkParams { - parachain_config: ¶chain_config, - net_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - para_id, - spawn_handle: task_manager.spawn_handle(), - relay_chain_interface: relay_chain_interface.clone(), - import_queue: params.import_queue, - sybil_resistance_level, - }) - .await?; +} - let rpc_builder = { - let client = client.clone(); - let transaction_pool = transaction_pool.clone(); +enum BuildOnAccess { + Uninitialized(Option R + Send + Sync>>), + Initialized(R), +} - let backend_for_rpc = backend.clone(); - Box::new(move |deny_unsafe, _| { - let deps = rpc::FullDeps { - client: client.clone(), - pool: transaction_pool.clone(), - deny_unsafe, - }; +impl BuildOnAccess { + fn get_mut(&mut self) -> &mut R { + loop { + match self { + Self::Uninitialized(f) => { + *self = Self::Initialized((f.take().unwrap())()); + }, + Self::Initialized(ref mut r) => return r, + } + } + } +} - rpc::create_full(deps, backend_for_rpc.clone()).map_err(Into::into) - }) - }; +/// Special [`ParachainConsensus`] implementation that waits for the upgrade from +/// shell to a parachain runtime that implements Aura. +struct WaitForAuraConsensus { + client: Arc, + aura_consensus: Arc>>>>, + relay_chain_consensus: Arc>>>, + _phantom: PhantomData, +} - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.keystore(), - backend: backend.clone(), - network: network.clone(), - sync_service: sync_service.clone(), - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - })?; - - if let Some(hwbench) = hwbench { - sc_sysinfo::print_hwbench(&hwbench); - if validator { - warn_if_slow_hardware(&hwbench); - } - - if let Some(ref mut telemetry) = telemetry { - let telemetry_handle = telemetry.handle(); - task_manager.spawn_handle().spawn( - "telemetry_hwbench", - None, - sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - ); +impl Clone for WaitForAuraConsensus { + fn clone(&self) -> Self { + Self { + client: self.client.clone(), + aura_consensus: self.aura_consensus.clone(), + relay_chain_consensus: self.relay_chain_consensus.clone(), + _phantom: PhantomData, } } - - let announce_block = { - let sync_service = sync_service.clone(); - Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - }; - - let relay_chain_slot_duration = Duration::from_secs(6); - - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; - - start_relay_chain_tasks(StartRelayChainTasksParams { - client: client.clone(), - announce_block: announce_block.clone(), - para_id, - relay_chain_interface: relay_chain_interface.clone(), - task_manager: &mut task_manager, - da_recovery_profile: if validator { - DARecoveryProfile::Collator - } else { - DARecoveryProfile::FullNode - }, - import_queue: import_queue_service, - relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle.clone()), - sync_service: sync_service.clone(), - })?; - - if validator { - start_consensus( - client.clone(), - block_import, - prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), - &task_manager, - relay_chain_interface.clone(), - transaction_pool, - sync_service.clone(), - params.keystore_container.keystore(), - relay_chain_slot_duration, - para_id, - collator_key.expect("Command line arguments do not allow this. qed"), - overseer_handle, - announce_block, - backend.clone(), - )?; - } - - start_network.start_network(); - - Ok((task_manager, client)) } -/// Start a node with the given parachain `Configuration` and relay chain `Configuration`. -/// -/// This is the actual implementation that is abstract over the executor and the runtime api. -/// -/// This node is basic in the sense that it doesn't support functionality like transaction -/// payment. Intended to replace start_shell_node in use for glutton, shell, and seedling. -#[sc_tracing::logging::prefix_logs_with("Parachain")] -async fn start_basic_lookahead_node_impl( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - sybil_resistance_level: CollatorSybilResistance, - para_id: ParaId, - rpc_ext_builder: RB, - build_import_queue: BIQ, - start_consensus: SC, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> +#[async_trait::async_trait] +impl ParachainConsensus for WaitForAuraConsensus where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + frame_rpc_system::AccountNonceApi, - RB: Fn(Arc>) -> Result, sc_service::Error> - + 'static, - BIQ: FnOnce( - Arc>, - ParachainBlockImport, - &Configuration, - Option, - &TaskManager, - ) -> Result, sc_service::Error>, - SC: FnOnce( - Arc>, - ParachainBlockImport, - Option<&Registry>, - Option, - &TaskManager, - Arc, - Arc>>, - Arc>, - KeystorePtr, - Duration, - ParaId, - CollatorPair, - OverseerHandle, - Arc>) + Send + Sync>, - Arc, - ) -> Result<(), sc_service::Error>, -{ - let parachain_config = prepare_node_config(parachain_config); - - let params = new_partial::(¶chain_config, build_import_queue)?; - let (block_import, mut telemetry, telemetry_worker_handle) = params.other; - - let client = params.client.clone(); - let backend = params.backend.clone(); - - let mut task_manager = params.task_manager; - let (relay_chain_interface, collator_key) = build_relay_chain_interface( - polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - collator_options.clone(), - hwbench.clone(), - ) - .await - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - - let validator = parachain_config.role.is_authority(); - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let transaction_pool = params.transaction_pool.clone(); - let import_queue_service = params.import_queue.service(); - let net_config = FullNetworkConfiguration::new(¶chain_config.network); - - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - build_network(BuildNetworkParams { - parachain_config: ¶chain_config, - net_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - para_id, - spawn_handle: task_manager.spawn_handle(), - relay_chain_interface: relay_chain_interface.clone(), - import_queue: params.import_queue, - sybil_resistance_level, - }) - .await?; - - let rpc_client = client.clone(); - let rpc_builder = Box::new(move |_, _| rpc_ext_builder(rpc_client.clone())); - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.keystore(), - backend: backend.clone(), - network: network.clone(), - sync_service: sync_service.clone(), - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - })?; - - if let Some(hwbench) = hwbench { - sc_sysinfo::print_hwbench(&hwbench); - if validator { - warn_if_slow_hardware(&hwbench); - } - - if let Some(ref mut telemetry) = telemetry { - let telemetry_handle = telemetry.handle(); - task_manager.spawn_handle().spawn( - "telemetry_hwbench", - None, - sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - ); - } - } - - let announce_block = { - let sync_service = sync_service.clone(); - Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - }; - - let relay_chain_slot_duration = Duration::from_secs(6); - - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; - - start_relay_chain_tasks(StartRelayChainTasksParams { - client: client.clone(), - announce_block: announce_block.clone(), - para_id, - relay_chain_interface: relay_chain_interface.clone(), - task_manager: &mut task_manager, - da_recovery_profile: if validator { - DARecoveryProfile::Collator - } else { - DARecoveryProfile::FullNode - }, - import_queue: import_queue_service, - relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle.clone()), - sync_service: sync_service.clone(), - })?; - - if validator { - start_consensus( - client.clone(), - block_import, - prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), - &task_manager, - relay_chain_interface.clone(), - transaction_pool, - sync_service.clone(), - params.keystore_container.keystore(), - relay_chain_slot_duration, - para_id, - collator_key.expect("Command line arguments do not allow this. qed"), - overseer_handle, - announce_block, - backend.clone(), - )?; - } - - start_network.start_network(); - - Ok((task_manager, client)) -} - -/// Build the import queue for the rococo parachain runtime. -pub fn rococo_parachain_build_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - telemetry: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - - cumulus_client_consensus_aura::import_queue::< - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - _, - _, - _, - >(cumulus_client_consensus_aura::ImportQueueParams { - block_import, - client, - create_inherent_data_providers: move |_, _| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((slot, timestamp)) - }, - registry: config.prometheus_registry(), - spawner: &task_manager.spawn_essential_handle(), - telemetry, - }) - .map_err(Into::into) -} - -/// Start a rococo parachain node. -pub async fn start_rococo_parachain_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - |_| Ok(RpcModule::new(())), - rococo_parachain_build_import_queue, - |client, - block_import, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - relay_chain_slot_duration, - para_id, - collator_key, - overseer_handle, - announce_block, - backend| { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - let proposer = Proposer::new(proposer_factory); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend.clone(), - relay_client: relay_chain_interface, - code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer, - collator_service, - authoring_duration: Duration::from_millis(1500), - reinitialize: false, - }; - - let fut = aura::run::< - Block, - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - _, - _, - _, - _, - _, - _, - _, - >(params); - task_manager.spawn_essential_handle().spawn("aura", None, fut); - - Ok(()) - }, - hwbench, - ) - .await -} - -/// Build the import queue for the shell runtime. -pub fn shell_build_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - _: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder, -{ - cumulus_client_consensus_relay_chain::import_queue( - client, - block_import, - |_, _| async { Ok(()) }, - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - ) - .map_err(Into::into) -} - -/// Start a polkadot-shell parachain node. -pub async fn start_shell_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo, -{ - start_shell_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Unresistant, // free-for-all consensus - para_id, - |_| Ok(RpcModule::new(())), - shell_build_import_queue, - |client, - block_import, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - _sync_oracle, - _keystore, - _relay_chain_slot_duration, - para_id, - collator_key, - overseer_handle, - announce_block| { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry, - ); - - let free_for_all = cumulus_client_consensus_relay_chain::build_relay_chain_consensus( - cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams { - para_id, - proposer_factory, - block_import, - relay_chain_interface: relay_chain_interface.clone(), - create_inherent_data_providers: move |_, (relay_parent, validation_data)| { - let relay_chain_interface = relay_chain_interface.clone(); - async move { - let parachain_inherent = - cumulus_client_parachain_inherent::ParachainInherentDataProvider::create_at( - relay_parent, - &relay_chain_interface, - &validation_data, - para_id, - ).await; - let parachain_inherent = parachain_inherent.ok_or_else(|| { - Box::::from( - "Failed to create parachain inherent", - ) - })?; - Ok(parachain_inherent) - } - }, - }, - ); - - let spawner = task_manager.spawn_handle(); - - // Required for free-for-all consensus - #[allow(deprecated)] - old_consensus::start_collator_sync(old_consensus::StartCollatorParams { - para_id, - block_status: client.clone(), - announce_block, - overseer_handle, - spawner, - key: collator_key, - parachain_consensus: free_for_all, - runtime_api: client.clone(), - }); - - Ok(()) - }, - hwbench, - ) - .await -} - -enum BuildOnAccess { - Uninitialized(Option R + Send + Sync>>), - Initialized(R), -} - -impl BuildOnAccess { - fn get_mut(&mut self) -> &mut R { - loop { - match self { - Self::Uninitialized(f) => { - *self = Self::Initialized((f.take().unwrap())()); - }, - Self::Initialized(ref mut r) => return r, - } - } - } -} - -/// Special [`ParachainConsensus`] implementation that waits for the upgrade from -/// shell to a parachain runtime that implements Aura. -struct WaitForAuraConsensus { - client: Arc, - aura_consensus: Arc>>>>, - relay_chain_consensus: Arc>>>, - _phantom: PhantomData, -} - -impl Clone for WaitForAuraConsensus { - fn clone(&self) -> Self { - Self { - client: self.client.clone(), - aura_consensus: self.aura_consensus.clone(), - relay_chain_consensus: self.relay_chain_consensus.clone(), - _phantom: PhantomData, - } - } -} - -#[async_trait::async_trait] -impl ParachainConsensus for WaitForAuraConsensus -where - Client: sp_api::ProvideRuntimeApi + Send + Sync, - Client::Api: AuraApi, - AuraId: Send + Codec + Sync, + Client: sp_api::ProvideRuntimeApi + Send + Sync, + Client::Api: AuraApi, + AuraId: Send + Codec + Sync, { async fn produce_candidate( &mut self, @@ -1190,464 +578,56 @@ where .get_mut() .produce_candidate(parent, relay_parent, validation_data) .await - } else { - self.relay_chain_consensus - .lock() - .await - .produce_candidate(parent, relay_parent, validation_data) - .await - } - } -} - -struct Verifier { - client: Arc, - aura_verifier: BuildOnAccess>>, - relay_chain_verifier: Box>, - _phantom: PhantomData, -} - -#[async_trait::async_trait] -impl VerifierT for Verifier -where - Client: sp_api::ProvideRuntimeApi + Send + Sync, - Client::Api: AuraApi, - AuraId: Send + Sync + Codec, -{ - async fn verify( - &mut self, - block_import: BlockImportParams, - ) -> Result, String> { - if self - .client - .runtime_api() - .has_api::>(*block_import.header.parent_hash()) - .unwrap_or(false) - { - self.aura_verifier.get_mut().verify(block_import).await - } else { - self.relay_chain_verifier.verify(block_import).await - } - } -} - -/// Build the import queue for Aura-based runtimes. -pub fn aura_build_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - telemetry_handle: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + sp_consensus_aura::AuraApi::Pair as Pair>::Public>, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, -{ - let verifier_client = client.clone(); - - let aura_verifier = move || { - Box::new(cumulus_client_consensus_aura::build_verifier::< - ::Pair, - _, - _, - _, - >(cumulus_client_consensus_aura::BuildVerifierParams { - client: verifier_client.clone(), - create_inherent_data_providers: move |parent_hash, _| { - let cidp_client = verifier_client.clone(); - async move { - let slot_duration = cumulus_client_consensus_aura::slot_duration_at( - &*cidp_client, - parent_hash, - )?; - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((slot, timestamp)) - } - }, - telemetry: telemetry_handle, - })) as Box<_> - }; - - let relay_chain_verifier = - Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { Ok(()) })) as Box<_>; - - let verifier = Verifier { - client, - relay_chain_verifier, - aura_verifier: BuildOnAccess::Uninitialized(Some(Box::new(aura_verifier))), - _phantom: PhantomData, - }; - - let registry = config.prometheus_registry(); - let spawner = task_manager.spawn_essential_handle(); - - Ok(BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry)) -} - -/// Start an aura powered parachain node. Some system chains use this. -pub async fn start_generic_aura_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + sp_consensus_aura::AuraApi::Pair as Pair>::Public> - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + frame_rpc_system::AccountNonceApi, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, -{ - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - |_| Ok(RpcModule::new(())), - aura_build_import_queue::<_, AuraId>, - |client, - block_import, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - relay_chain_slot_duration, - para_id, - collator_key, - overseer_handle, - announce_block, - _backend| { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - let proposer = Proposer::new(proposer_factory); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let params = BasicAuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client, - relay_client: relay_chain_interface, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - slot_duration, - relay_chain_slot_duration, - proposer, - collator_service, - // Very limited proposal time. - authoring_duration: Duration::from_millis(500), - collation_request_receiver: None, - }; - - let fut = - basic_aura::run::::Pair, _, _, _, _, _, _, _>(params); - task_manager.spawn_essential_handle().spawn("aura", None, fut); - - Ok(()) - }, - hwbench, - ) - .await -} - -/// Uses the lookahead collator to support async backing. -/// -/// Start an aura powered parachain node. Some system chains use this. -pub async fn start_generic_aura_lookahead_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + sp_consensus_aura::AuraApi::Pair as Pair>::Public> - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + frame_rpc_system::AccountNonceApi - + cumulus_primitives_aura::AuraUnincludedSegmentApi, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, -{ - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - |_| Ok(RpcModule::new(())), - aura_build_import_queue::<_, AuraId>, - |client, - block_import, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - relay_chain_slot_duration, - para_id, - collator_key, - overseer_handle, - announce_block, - backend| { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - let proposer = Proposer::new(proposer_factory); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend, - relay_client: relay_chain_interface, - code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer, - collator_service, - authoring_duration: Duration::from_millis(1500), - reinitialize: false, - }; - - let fut = - aura::run::::Pair, _, _, _, _, _, _, _, _, _>(params); - task_manager.spawn_essential_handle().spawn("aura", None, fut); - - Ok(()) - }, - hwbench, - ) - .await -} - -/// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub -/// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and -/// needs to sync and upgrade before it can run `AuraApi` functions. -pub async fn start_asset_hub_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + sp_consensus_aura::AuraApi::Pair as Pair>::Public> - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + frame_rpc_system::AccountNonceApi, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, -{ - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - |_| Ok(RpcModule::new(())), - aura_build_import_queue::<_, AuraId>, - |client, - block_import, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - relay_chain_slot_duration, - para_id, - collator_key, - overseer_handle, - announce_block, - _backend| { - let relay_chain_interface2 = relay_chain_interface.clone(); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let spawner = task_manager.spawn_handle(); - - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - spawner, - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - - let collation_future = Box::pin(async move { - // Start collating with the `shell` runtime while waiting for an upgrade to an Aura - // compatible runtime. - let mut request_stream = cumulus_client_collator::relay_chain_driven::init( - collator_key.clone(), - para_id, - overseer_handle.clone(), - ) - .await; - while let Some(request) = request_stream.next().await { - let pvd = request.persisted_validation_data().clone(); - let last_head_hash = - match ::Header::decode(&mut &pvd.parent_head.0[..]) { - Ok(header) => header.hash(), - Err(e) => { - log::error!("Could not decode the head data: {e}"); - request.complete(None); - continue - }, - }; - - // Check if we have upgraded to an Aura compatible runtime and transition if - // necessary. - if client - .runtime_api() - .has_api::>(last_head_hash) - .unwrap_or(false) - { - // Respond to this request before transitioning to Aura. - request.complete(None); - break - } - } - - // Move to Aura consensus. - let slot_duration = match cumulus_client_consensus_aura::slot_duration(&*client) { - Ok(d) => d, - Err(e) => { - log::error!("Could not get Aura slot duration: {e}"); - return - }, - }; - - let proposer = Proposer::new(proposer_factory); - - let params = BasicAuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client, - relay_client: relay_chain_interface2, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - slot_duration, - relay_chain_slot_duration, - proposer, - collator_service, - // Very limited proposal time. - authoring_duration: Duration::from_millis(500), - collation_request_receiver: Some(request_stream), - }; - - basic_aura::run::::Pair, _, _, _, _, _, _, _>(params) - .await - }); + } else { + self.relay_chain_consensus + .lock() + .await + .produce_candidate(parent, relay_parent, validation_data) + .await + } + } +} - let spawner = task_manager.spawn_essential_handle(); - spawner.spawn_essential("cumulus-asset-hub-collator", None, collation_future); +struct Verifier { + client: Arc, + aura_verifier: BuildOnAccess>>, + relay_chain_verifier: Box>, + _phantom: PhantomData, +} - Ok(()) - }, - hwbench, - ) - .await +#[async_trait::async_trait] +impl VerifierT for Verifier +where + Client: sp_api::ProvideRuntimeApi + Send + Sync, + Client::Api: AuraApi, + AuraId: Send + Sync + Codec, +{ + async fn verify( + &mut self, + block_import: BlockImportParams, + ) -> Result, String> { + if self + .client + .runtime_api() + .has_api::>(*block_import.header.parent_hash()) + .unwrap_or(false) + { + self.aura_verifier.get_mut().verify(block_import).await + } else { + self.relay_chain_verifier.verify(block_import).await + } + } } -/// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub -/// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and -/// needs to sync and upgrade before it can run `AuraApi` functions. -/// -/// Uses the lookahead collator to support async backing. -#[sc_tracing::logging::prefix_logs_with("Parachain")] -pub async fn start_asset_hub_lookahead_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> +/// Build the import queue for parachain runtimes that started with relay chain consensus and +/// switched to aura. +pub fn build_relay_to_aura_import_queue( + client: Arc>, + block_import: ParachainBlockImport, + config: &Configuration, + telemetry_handle: Option, + task_manager: &TaskManager, +) -> Result, sc_service::Error> where RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue @@ -1656,162 +636,74 @@ where + sp_api::ApiExt + sp_offchain::OffchainWorkerApi + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + sp_consensus_aura::AuraApi::Pair as Pair>::Public> - + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + frame_rpc_system::AccountNonceApi - + cumulus_primitives_aura::AuraUnincludedSegmentApi, + + sp_consensus_aura::AuraApi::Pair as Pair>::Public>, <::Pair as Pair>::Signature: TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - CollatorSybilResistance::Resistant, // Aura - para_id, - |_| Ok(RpcModule::new(())), - aura_build_import_queue::<_, AuraId>, - |client, - block_import, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - relay_chain_slot_duration, - para_id, - collator_key, - overseer_handle, - announce_block, - backend| { - let relay_chain_interface2 = relay_chain_interface.clone(); - - let collator_service = CollatorService::new( - client.clone(), - Arc::new(task_manager.spawn_handle()), - announce_block, - client.clone(), - ); - - let spawner = task_manager.spawn_handle(); + let verifier_client = client.clone(); - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - spawner, - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); + let aura_verifier = move || { + Box::new(cumulus_client_consensus_aura::build_verifier::< + ::Pair, + _, + _, + _, + >(cumulus_client_consensus_aura::BuildVerifierParams { + client: verifier_client.clone(), + create_inherent_data_providers: move |parent_hash, _| { + let cidp_client = verifier_client.clone(); + async move { + let slot_duration = cumulus_client_consensus_aura::slot_duration_at( + &*cidp_client, + parent_hash, + )?; + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - let collation_future = Box::pin(async move { - // Start collating with the `shell` runtime while waiting for an upgrade to an Aura - // compatible runtime. - let mut request_stream = cumulus_client_collator::relay_chain_driven::init( - collator_key.clone(), - para_id, - overseer_handle.clone(), - ) - .await; - while let Some(request) = request_stream.next().await { - let pvd = request.persisted_validation_data().clone(); - let last_head_hash = - match ::Header::decode(&mut &pvd.parent_head.0[..]) { - Ok(header) => header.hash(), - Err(e) => { - log::error!("Could not decode the head data: {e}"); - request.complete(None); - continue - }, - }; + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); - // Check if we have upgraded to an Aura compatible runtime and transition if - // necessary. - if client - .runtime_api() - .has_api::>(last_head_hash) - .unwrap_or(false) - { - // Respond to this request before transitioning to Aura. - request.complete(None); - break - } + Ok((slot, timestamp)) } + }, + telemetry: telemetry_handle, + })) as Box<_> + }; - // Move to Aura consensus. - let proposer = Proposer::new(proposer_factory); - - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend, - relay_client: relay_chain_interface2, - code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer, - collator_service, - authoring_duration: Duration::from_millis(1500), - reinitialize: true, /* we need to always re-initialize for asset-hub moving - * to aura */ - }; + let relay_chain_verifier = + Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { Ok(()) })) as Box<_>; - aura::run::::Pair, _, _, _, _, _, _, _, _, _>(params) - .await - }); + let verifier = Verifier { + client, + relay_chain_verifier, + aura_verifier: BuildOnAccess::Uninitialized(Some(Box::new(aura_verifier))), + _phantom: PhantomData, + }; - let spawner = task_manager.spawn_essential_handle(); - spawner.spawn_essential("cumulus-asset-hub-collator", None, collation_future); + let registry = config.prometheus_registry(); + let spawner = task_manager.spawn_essential_handle(); - Ok(()) - }, - hwbench, - ) - .await + Ok(BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry)) } -/// Start an aura powered parachain node which uses the lookahead collator to support async backing. -/// This node is basic in the sense that its runtime api doesn't include common contents such as -/// transaction payment. Used for aura glutton. -pub async fn start_basic_lookahead_node( +/// Start an aura powered parachain node. Some system chains use this. +pub async fn start_generic_aura_node( parachain_config: Configuration, polkadot_config: Configuration, collator_options: CollatorOptions, para_id: ParaId, hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> -where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt - + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo - + sp_consensus_aura::AuraApi::Pair as Pair>::Public> - + frame_rpc_system::AccountNonceApi - + cumulus_primitives_aura::AuraUnincludedSegmentApi, - <::Pair as Pair>::Signature: - TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, -{ - start_basic_lookahead_node_impl::( +) -> sc_service::error::Result<(TaskManager, Arc>)> { + start_node_impl::( parachain_config, polkadot_config, collator_options, CollatorSybilResistance::Resistant, // Aura para_id, - |_| Ok(RpcModule::new(())), - aura_build_import_queue::<_, AuraId>, + build_parachain_rpc_extensions::, + build_relay_to_aura_import_queue::<_, AuraId>, |client, block_import, prometheus_registry, @@ -1826,7 +718,9 @@ where collator_key, overseer_handle, announce_block, - backend| { + _backend| { + let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), client.clone(), @@ -1843,29 +737,27 @@ where client.clone(), ); - let params = AuraParams { + let params = BasicAuraParams { create_inherent_data_providers: move |_, ()| async move { Ok(()) }, block_import, - para_client: client.clone(), - para_backend: backend.clone(), + para_client: client, relay_client: relay_chain_interface, - code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, sync_oracle, keystore, collator_key, para_id, overseer_handle, + slot_duration, relay_chain_slot_duration, proposer, collator_service, - authoring_duration: Duration::from_millis(1500), - reinitialize: false, + // Very limited proposal time. + authoring_duration: Duration::from_millis(500), + collation_request_receiver: None, }; let fut = - aura::run::::Pair, _, _, _, _, _, _, _, _, _>(params); + basic_aura::run::::Pair, _, _, _, _, _, _, _>(params); task_manager.spawn_essential_handle().spawn("aura", None, fut); Ok(()) @@ -1875,16 +767,38 @@ where .await } -#[sc_tracing::logging::prefix_logs_with("Parachain")] -async fn start_contracts_rococo_node_impl( +/// Uses the lookahead collator to support async backing. +/// +/// Start an aura powered parachain node. Some system chains use this. +pub async fn start_generic_aura_lookahead_node( + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, +) -> sc_service::error::Result<(TaskManager, Arc>)> { + start_node_impl::( + parachain_config, + polkadot_config, + collator_options, + CollatorSybilResistance::Resistant, // Aura + para_id, + build_parachain_rpc_extensions::, + build_relay_to_aura_import_queue::<_, AuraId>, + start_lookahead_aura_consensus, + hwbench, + ) + .await +} + +/// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub +/// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and +/// needs to sync and upgrade before it can run `AuraApi` functions. +pub async fn start_asset_hub_node( parachain_config: Configuration, polkadot_config: Configuration, collator_options: CollatorOptions, - sybil_resistance_level: CollatorSybilResistance, para_id: ParaId, - _rpc_ext_builder: RB, - build_import_queue: BIQ, - start_consensus: SC, hwbench: Option, ) -> sc_service::error::Result<(TaskManager, Arc>)> where @@ -1896,228 +810,169 @@ where + sp_offchain::OffchainWorkerApi + sp_block_builder::BlockBuilder + cumulus_primitives_core::CollectCollationInfo + + sp_consensus_aura::AuraApi::Pair as Pair>::Public> + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi - + frame_rpc_system::AccountNonceApi - + cumulus_primitives_aura::AuraUnincludedSegmentApi, - RB: Fn(Arc>) -> Result, sc_service::Error>, - BIQ: FnOnce( - Arc>, - ParachainBlockImport, - &Configuration, - Option, - &TaskManager, - ) -> Result, sc_service::Error>, - SC: FnOnce( - Arc>, - ParachainBlockImport, - Option<&Registry>, - Option, - &TaskManager, - Arc, - Arc>>, - Arc>, - KeystorePtr, - Duration, - ParaId, - CollatorPair, - OverseerHandle, - Arc>) + Send + Sync>, - Arc, - ) -> Result<(), sc_service::Error>, + + frame_rpc_system::AccountNonceApi, + <::Pair as Pair>::Signature: + TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, { - let parachain_config = prepare_node_config(parachain_config); - - let params = new_partial::(¶chain_config, build_import_queue)?; - let (block_import, mut telemetry, telemetry_worker_handle) = params.other; - - let client = params.client.clone(); - let backend = params.backend.clone(); - let mut task_manager = params.task_manager; - - let (relay_chain_interface, collator_key) = build_relay_chain_interface( + start_node_impl::( + parachain_config, polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - collator_options.clone(), - hwbench.clone(), - ) - .await - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - - let validator = parachain_config.role.is_authority(); - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let transaction_pool = params.transaction_pool.clone(); - let import_queue_service = params.import_queue.service(); - let net_config = FullNetworkConfiguration::new(¶chain_config.network); - - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - build_network(BuildNetworkParams { - parachain_config: ¶chain_config, - net_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - para_id, - spawn_handle: task_manager.spawn_handle(), - relay_chain_interface: relay_chain_interface.clone(), - import_queue: params.import_queue, - sybil_resistance_level, - }) - .await?; - - let rpc_builder = { - let client = client.clone(); - let transaction_pool = transaction_pool.clone(); - - Box::new(move |deny_unsafe, _| { - let deps = crate::rpc::FullDeps { - client: client.clone(), - pool: transaction_pool.clone(), - deny_unsafe, - }; - - crate::rpc::create_contracts_rococo(deps).map_err(Into::into) - }) - }; - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.keystore(), - backend: backend.clone(), - network: network.clone(), - sync_service: sync_service.clone(), - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - })?; - - if let Some(hwbench) = hwbench { - sc_sysinfo::print_hwbench(&hwbench); - if validator { - warn_if_slow_hardware(&hwbench); - } - - if let Some(ref mut telemetry) = telemetry { - let telemetry_handle = telemetry.handle(); - task_manager.spawn_handle().spawn( - "telemetry_hwbench", - None, - sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - ); - } - } - - let announce_block = { - let sync_service = sync_service.clone(); - Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - }; - - let relay_chain_slot_duration = Duration::from_secs(6); - - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; - - start_relay_chain_tasks(StartRelayChainTasksParams { - client: client.clone(), - announce_block: announce_block.clone(), + collator_options, + CollatorSybilResistance::Resistant, // Aura para_id, - relay_chain_interface: relay_chain_interface.clone(), - task_manager: &mut task_manager, - da_recovery_profile: if validator { - DARecoveryProfile::Collator - } else { - DARecoveryProfile::FullNode - }, - import_queue: import_queue_service, - relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle.clone()), - sync_service: sync_service.clone(), - })?; + build_parachain_rpc_extensions::, + build_relay_to_aura_import_queue::<_, AuraId>, + |client, + block_import, + prometheus_registry, + telemetry, + task_manager, + relay_chain_interface, + transaction_pool, + sync_oracle, + keystore, + relay_chain_slot_duration, + para_id, + collator_key, + overseer_handle, + announce_block, + _backend| { + let relay_chain_interface2 = relay_chain_interface.clone(); - if validator { - start_consensus( - client.clone(), - block_import, - prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), - &task_manager, - relay_chain_interface.clone(), - transaction_pool, - sync_service.clone(), - params.keystore_container.keystore(), - relay_chain_slot_duration, - para_id, - collator_key.expect("Command line arguments do not allow this. qed"), - overseer_handle, - announce_block, - backend.clone(), - )?; - } + let collator_service = CollatorService::new( + client.clone(), + Arc::new(task_manager.spawn_handle()), + announce_block, + client.clone(), + ); - start_network.start_network(); + let spawner = task_manager.spawn_handle(); - Ok((task_manager, client)) -} + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + spawner, + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); -#[allow(clippy::type_complexity)] -pub fn contracts_rococo_build_import_queue( - client: Arc>, - block_import: ParachainBlockImport, - config: &Configuration, - telemetry: Option, - task_manager: &TaskManager, -) -> Result, sc_service::Error> { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; + let collation_future = Box::pin(async move { + // Start collating with the `shell` runtime while waiting for an upgrade to an Aura + // compatible runtime. + let mut request_stream = cumulus_client_collator::relay_chain_driven::init( + collator_key.clone(), + para_id, + overseer_handle.clone(), + ) + .await; + while let Some(request) = request_stream.next().await { + let pvd = request.persisted_validation_data().clone(); + let last_head_hash = + match ::Header::decode(&mut &pvd.parent_head.0[..]) { + Ok(header) => header.hash(), + Err(e) => { + log::error!("Could not decode the head data: {e}"); + request.complete(None); + continue + }, + }; - cumulus_client_consensus_aura::import_queue::< - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - _, - _, - _, - >(cumulus_client_consensus_aura::ImportQueueParams { - block_import, - client, - create_inherent_data_providers: move |_, _| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + // Check if we have upgraded to an Aura compatible runtime and transition if + // necessary. + if client + .runtime_api() + .has_api::>(last_head_hash) + .unwrap_or(false) + { + // Respond to this request before transitioning to Aura. + request.complete(None); + break + } + } - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, + // Move to Aura consensus. + let slot_duration = match cumulus_client_consensus_aura::slot_duration(&*client) { + Ok(d) => d, + Err(e) => { + log::error!("Could not get Aura slot duration: {e}"); + return + }, + }; + + let proposer = Proposer::new(proposer_factory); + + let params = BasicAuraParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client, + relay_client: relay_chain_interface2, + sync_oracle, + keystore, + collator_key, + para_id, + overseer_handle, slot_duration, - ); + relay_chain_slot_duration, + proposer, + collator_service, + // Very limited proposal time. + authoring_duration: Duration::from_millis(500), + collation_request_receiver: Some(request_stream), + }; - Ok((slot, timestamp)) + basic_aura::run::::Pair, _, _, _, _, _, _, _>(params) + .await + }); + + let spawner = task_manager.spawn_essential_handle(); + spawner.spawn_essential("cumulus-asset-hub-collator", None, collation_future); + + Ok(()) }, - registry: config.prometheus_registry(), - spawner: &task_manager.spawn_essential_handle(), - telemetry, - }) - .map_err(Into::into) + hwbench, + ) + .await } -/// Start a parachain node. -pub async fn start_contracts_rococo_node( +/// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub +/// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and +/// needs to sync and upgrade before it can run `AuraApi` functions. +/// +/// Uses the lookahead collator to support async backing. +#[sc_tracing::logging::prefix_logs_with("Parachain")] +pub async fn start_asset_hub_lookahead_node( parachain_config: Configuration, polkadot_config: Configuration, collator_options: CollatorOptions, para_id: ParaId, hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc>)> { - start_contracts_rococo_node_impl::( +) -> sc_service::error::Result<(TaskManager, Arc>)> +where + RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, + RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue + + sp_api::Metadata + + sp_session::SessionKeys + + sp_api::ApiExt + + sp_offchain::OffchainWorkerApi + + sp_block_builder::BlockBuilder + + cumulus_primitives_core::CollectCollationInfo + + sp_consensus_aura::AuraApi::Pair as Pair>::Public> + + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + + frame_rpc_system::AccountNonceApi + + cumulus_primitives_aura::AuraUnincludedSegmentApi, + <::Pair as Pair>::Signature: + TryFrom> + std::hash::Hash + sp_runtime::traits::Member + Codec, +{ + start_node_impl::( parachain_config, polkadot_config, collator_options, CollatorSybilResistance::Resistant, // Aura para_id, - |_| Ok(RpcModule::new(())), - contracts_rococo_build_import_queue, + build_parachain_rpc_extensions::, + build_relay_to_aura_import_queue::<_, AuraId>, |client, block_import, prometheus_registry, @@ -2133,14 +988,7 @@ pub async fn start_contracts_rococo_node( overseer_handle, announce_block, backend| { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - let proposer = Proposer::new(proposer_factory); + let relay_chain_interface2 = relay_chain_interface.clone(); let collator_service = CollatorService::new( client.clone(), @@ -2149,42 +997,81 @@ pub async fn start_contracts_rococo_node( client.clone(), ); - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend.clone(), - relay_client: relay_chain_interface, - code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - }, - sync_oracle, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer, - collator_service, - // Very limited proposal time. - authoring_duration: Duration::from_millis(1500), - reinitialize: false, - }; + let spawner = task_manager.spawn_handle(); - let fut = aura::run::< - Block, - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - _, - _, - _, - _, - _, - _, - _, - >(params); - task_manager.spawn_essential_handle().spawn("aura", None, fut); + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + spawner, + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); + + let collation_future = Box::pin(async move { + // Start collating with the `shell` runtime while waiting for an upgrade to an Aura + // compatible runtime. + let mut request_stream = cumulus_client_collator::relay_chain_driven::init( + collator_key.clone(), + para_id, + overseer_handle.clone(), + ) + .await; + while let Some(request) = request_stream.next().await { + let pvd = request.persisted_validation_data().clone(); + let last_head_hash = + match ::Header::decode(&mut &pvd.parent_head.0[..]) { + Ok(header) => header.hash(), + Err(e) => { + log::error!("Could not decode the head data: {e}"); + request.complete(None); + continue + }, + }; + + // Check if we have upgraded to an Aura compatible runtime and transition if + // necessary. + if client + .runtime_api() + .has_api::>(last_head_hash) + .unwrap_or(false) + { + // Respond to this request before transitioning to Aura. + request.complete(None); + break + } + } + + // Move to Aura consensus. + let proposer = Proposer::new(proposer_factory); + + let params = AuraParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend, + relay_client: relay_chain_interface2, + code_hash_provider: move |block_hash| { + client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) + }, + sync_oracle, + keystore, + collator_key, + para_id, + overseer_handle, + relay_chain_slot_duration, + proposer, + collator_service, + authoring_duration: Duration::from_millis(1500), + reinitialize: true, /* we need to always re-initialize for asset-hub moving + * to aura */ + }; + + aura::run::::Pair, _, _, _, _, _, _, _, _, _>(params) + .await + }); + + let spawner = task_manager.spawn_essential_handle(); + spawner.spawn_essential("cumulus-asset-hub-collator", None, collation_future); Ok(()) }, @@ -2193,6 +1080,184 @@ pub async fn start_contracts_rococo_node( .await } +/// Start relay-chain consensus that is free for all. Everyone can submit a block, the relay-chain +/// decides what is backed and included. +fn start_relay_chain_consensus( + client: Arc>, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>>, + _sync_oracle: Arc>, + _keystore: KeystorePtr, + _relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, + _backend: Arc, +) -> Result<(), sc_service::Error> { + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry, + ); + + let free_for_all = cumulus_client_consensus_relay_chain::build_relay_chain_consensus( + cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams { + para_id, + proposer_factory, + block_import, + relay_chain_interface: relay_chain_interface.clone(), + create_inherent_data_providers: move |_, (relay_parent, validation_data)| { + let relay_chain_interface = relay_chain_interface.clone(); + async move { + let parachain_inherent = + cumulus_client_parachain_inherent::ParachainInherentDataProvider::create_at( + relay_parent, + &relay_chain_interface, + &validation_data, + para_id, + ).await; + let parachain_inherent = parachain_inherent.ok_or_else(|| { + Box::::from( + "Failed to create parachain inherent", + ) + })?; + Ok(parachain_inherent) + } + }, + }, + ); + + let spawner = task_manager.spawn_handle(); + + // Required for free-for-all consensus + #[allow(deprecated)] + old_consensus::start_collator_sync(old_consensus::StartCollatorParams { + para_id, + block_status: client.clone(), + announce_block, + overseer_handle, + spawner, + key: collator_key, + parachain_consensus: free_for_all, + runtime_api: client.clone(), + }); + + Ok(()) +} + +/// Start consensus using the lookahead aura collator. +fn start_lookahead_aura_consensus( + client: Arc>, + block_import: ParachainBlockImport, + prometheus_registry: Option<&Registry>, + telemetry: Option, + task_manager: &TaskManager, + relay_chain_interface: Arc, + transaction_pool: Arc>>, + sync_oracle: Arc>, + keystore: KeystorePtr, + relay_chain_slot_duration: Duration, + para_id: ParaId, + collator_key: CollatorPair, + overseer_handle: OverseerHandle, + announce_block: Arc>) + Send + Sync>, + backend: Arc, +) -> Result<(), sc_service::Error> { + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); + + let collator_service = CollatorService::new( + client.clone(), + Arc::new(task_manager.spawn_handle()), + announce_block, + client.clone(), + ); + + let params = AuraParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend, + relay_client: relay_chain_interface, + code_hash_provider: move |block_hash| { + client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) + }, + sync_oracle, + keystore, + collator_key, + para_id, + overseer_handle, + relay_chain_slot_duration, + proposer: Proposer::new(proposer_factory), + collator_service, + authoring_duration: Duration::from_millis(1500), + reinitialize: false, + }; + + let fut = aura::run::::Pair, _, _, _, _, _, _, _, _, _>(params); + task_manager.spawn_essential_handle().spawn("aura", None, fut); + + Ok(()) +} + +/// Start an aura powered parachain node which uses the lookahead collator to support async backing. +/// This node is basic in the sense that its runtime api doesn't include common contents such as +/// transaction payment. Used for aura glutton. +pub async fn start_basic_lookahead_node( + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, +) -> sc_service::error::Result<(TaskManager, Arc>)> { + start_node_impl::( + parachain_config, + polkadot_config, + collator_options, + CollatorSybilResistance::Resistant, // Aura + para_id, + |_, _, _, _| Ok(RpcModule::new(())), + build_relay_to_aura_import_queue::<_, AuraId>, + start_lookahead_aura_consensus, + hwbench, + ) + .await +} + +/// Start a parachain node for Rococo Contracts. +pub async fn start_contracts_rococo_node( + parachain_config: Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, +) -> sc_service::error::Result<(TaskManager, Arc>)> { + start_node_impl::( + parachain_config, + polkadot_config, + collator_options, + CollatorSybilResistance::Resistant, // Aura + para_id, + build_contracts_rpc_extensions, + build_aura_import_queue, + start_lookahead_aura_consensus, + hwbench, + ) + .await +} + /// Checks that the hardware meets the requirements and print a warning otherwise. fn warn_if_slow_hardware(hwbench: &sc_sysinfo::HwBench) { // Polkadot para-chains should generally use these requirements to ensure that the relay-chain -- GitLab From f977c211573e65b9f89886e7211e9b3148ce7e05 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Fri, 8 Mar 2024 10:48:55 +0100 Subject: [PATCH 65/86] Contracts Bump ApiVersion and add test (#3619) ApiVersion should have been bumped with https://github.com/paritytech/polkadot-sdk/pull/3606 this does that and add a test so we don't forget to do that everytime --- substrate/frame/contracts/proc-macro/src/lib.rs | 5 +++++ substrate/frame/contracts/src/lib.rs | 11 ++++++++++- substrate/frame/contracts/src/wasm/mod.rs | 3 +++ substrate/frame/contracts/src/wasm/runtime.rs | 1 - 4 files changed, 18 insertions(+), 2 deletions(-) diff --git a/substrate/frame/contracts/proc-macro/src/lib.rs b/substrate/frame/contracts/proc-macro/src/lib.rs index de961776c32..1794d09d5ad 100644 --- a/substrate/frame/contracts/proc-macro/src/lib.rs +++ b/substrate/frame/contracts/proc-macro/src/lib.rs @@ -497,9 +497,14 @@ fn expand_docs(def: &EnvDef) -> TokenStream2 { fn expand_env(def: &EnvDef, docs: bool) -> TokenStream2 { let impls = expand_impls(def); let docs = docs.then_some(expand_docs(def)).unwrap_or(TokenStream2::new()); + let stable_api_count = def.host_funcs.iter().filter(|f| f.is_stable).count(); quote! { pub struct Env; + + #[cfg(test)] + pub const STABLE_API_COUNT: usize = #stable_api_count; + #impls /// Documentation of the API (host functions) available to contracts. /// diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 0511bcf7f3f..de84d5220c5 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -222,10 +222,19 @@ pub struct Environment { pub struct ApiVersion(u16); impl Default for ApiVersion { fn default() -> Self { - Self(1) + Self(2) } } +#[test] +fn api_version_is_up_to_date() { + assert_eq!( + 109, + crate::wasm::STABLE_API_COUNT, + "Stable API count has changed. Bump the returned value of ApiVersion::default() and update the test." + ); +} + #[frame_support::pallet] pub mod pallet { use super::*; diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index 23363780b14..af287a6f2df 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -21,6 +21,9 @@ mod prepare; mod runtime; +#[cfg(test)] +pub use runtime::STABLE_API_COUNT; + #[cfg(doc)] pub use crate::wasm::runtime::api_doc; diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 402ff78dcde..7d43d30ba58 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -987,7 +987,6 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { // for every function. #[define_env(doc)] pub mod env { - /// Set the value at the given key in the contract storage. /// See [`pallet_contracts_uapi::HostFn::set_storage`] #[prefixed_alias] -- GitLab From 6f3caac0517278f0fdd614545c9ef620200bc02f Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Fri, 8 Mar 2024 14:10:47 +0200 Subject: [PATCH 66/86] collator-protocol: Always stay connected to validators in backing group (#3544) Looking at rococo-asset-hub https://github.com/paritytech/polkadot-sdk/issues/3519 there seems to be a lot of instances where collator did not advertise their collations, while there are multiple problems there, one of it is that we are connecting and disconnecting to our assigned validators every block, because on reconnect_timeout every 4s we call connect_to_validators and that will produce 0 validators when all went well, so set_reseverd_peers called from validator discovery will disconnect all our peers. More details here: https://github.com/paritytech/polkadot-sdk/issues/3519#issuecomment-1972667343 Now, this shouldn't be a problem, but it stacks with an existing bug in our network stack where if disconnect from a peer the peer might not notice it, so it won't detect the reconnect either and it won't send us the necessary view updates, so we won't advertise the collation to it more details here: https://github.com/paritytech/polkadot-sdk/issues/3519#issuecomment-1972958276 To avoid hitting this condition that often, let's keep the peers in the reserved set for the entire duration we are allocated to a backing group. Backing group sizes(1 rococo, 3 kusama, 5 polkadot) are really small, so this shouldn't lead to that many connections. Additionally, the validators would disconnect us any way if we don't advertise anything for 4 blocks. ## TODO - [x] More testing. - [x] Confirm on rococo that this is improving the situation. (It doesn't but just because other things are going wrong there). --------- Signed-off-by: Alexandru Gheorghe --- .../src/collator_side/validators_buffer.rs | 38 +++++++++++++++---- .../src/validator_side/mod.rs | 2 +- 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs b/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs index 5b88efc99d8..1533f2eda5a 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs @@ -90,8 +90,7 @@ impl ValidatorGroupsBuffer { } } - /// Returns discovery ids of validators we have at least one advertised-but-not-fetched - /// collation for. + /// Returns discovery ids of validators we are assigned to in this backing group window. pub fn validators_to_connect(&self) -> Vec { let validators_num = self.validators.len(); let bits = self @@ -99,11 +98,22 @@ impl ValidatorGroupsBuffer { .values() .fold(bitvec![0; validators_num], |acc, next| acc | next); - self.validators + let mut should_be_connected: Vec = self + .validators .iter() .enumerate() .filter_map(|(idx, authority_id)| bits[idx].then_some(authority_id.clone())) - .collect() + .collect(); + + if let Some(last_group) = self.group_infos.iter().last() { + for validator in self.validators.iter().rev().take(last_group.len) { + if !should_be_connected.contains(validator) { + should_be_connected.push(validator.clone()); + } + } + } + + should_be_connected } /// Note a new advertisement, marking that we want to be connected to validators @@ -279,7 +289,7 @@ mod tests { assert_eq!(buf.validators_to_connect(), validators[..2].to_vec()); buf.reset_validator_interest(hash_a, &validators[1]); - assert_eq!(buf.validators_to_connect(), vec![validators[0].clone()]); + assert_eq!(buf.validators_to_connect(), validators[0..2].to_vec()); buf.note_collation_advertised(hash_b, 0, GroupIndex(1), &validators[2..]); assert_eq!(buf.validators_to_connect(), validators[2..].to_vec()); @@ -287,7 +297,11 @@ mod tests { for validator in &validators[2..] { buf.reset_validator_interest(hash_b, validator); } - assert!(buf.validators_to_connect().is_empty()); + let mut expected = validators[2..].to_vec(); + expected.sort(); + let mut result = buf.validators_to_connect(); + result.sort(); + assert_eq!(result, expected); } #[test] @@ -320,10 +334,18 @@ mod tests { } buf.reset_validator_interest(hashes[1], &validators[0]); - assert_eq!(buf.validators_to_connect(), validators[..2].to_vec()); + let mut expected: Vec<_> = validators[..4].iter().cloned().collect(); + let mut result = buf.validators_to_connect(); + expected.sort(); + result.sort(); + assert_eq!(result, expected); buf.reset_validator_interest(hashes[0], &validators[0]); - assert_eq!(buf.validators_to_connect(), vec![validators[1].clone()]); + let mut expected: Vec<_> = validators[1..4].iter().cloned().collect(); + expected.sort(); + let mut result = buf.validators_to_connect(); + result.sort(); + assert_eq!(result, expected); buf.note_collation_advertised(hashes[3], 0, GroupIndex(1), &validators[2..4]); buf.note_collation_advertised( diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 48ad3c711a6..a1b93fff348 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -1883,7 +1883,7 @@ async fn disconnect_inactive_peers( ) { for (peer, peer_data) in peers { if peer_data.is_inactive(&eviction_policy) { - gum::trace!(target: LOG_TARGET, "Disconnecting inactive peer"); + gum::trace!(target: LOG_TARGET, ?peer, "Disconnecting inactive peer"); disconnect_peer(sender, *peer).await; } } -- GitLab From a0c9a3d65adb63d83afdf5b29d2e555dea0fa35e Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Fri, 8 Mar 2024 17:36:09 +0100 Subject: [PATCH 67/86] benchmark: allow range trailing comma in RangeArgs (#3598) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rustfmt will add a trailing comma for longer expression, this change will make sure that the Range parameters can still be parsed. --------- Co-authored-by: command-bot <> Co-authored-by: Alexander Theißen --- substrate/frame/support/procedural/src/benchmark.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/substrate/frame/support/procedural/src/benchmark.rs b/substrate/frame/support/procedural/src/benchmark.rs index d16ad2aaa51..27c75a7f054 100644 --- a/substrate/frame/support/procedural/src/benchmark.rs +++ b/substrate/frame/support/procedural/src/benchmark.rs @@ -65,6 +65,7 @@ struct RangeArgs { start: syn::GenericArgument, _comma: Comma, end: syn::GenericArgument, + _trailing_comma: Option, _gt_token: Gt, } -- GitLab From 1fe3c5f23b9c057a2ff1926daf4ecfd5b022da45 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Fri, 8 Mar 2024 19:13:31 +0100 Subject: [PATCH 68/86] Contracts: Fix terminate benchmark (#3558) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Alexander Theißen Co-authored-by: command-bot <> --- substrate/frame/contracts/src/benchmarking/mod.rs | 3 ++- substrate/frame/contracts/src/storage.rs | 5 +++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index 2ecd56b412d..794777ef05c 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -927,7 +927,7 @@ benchmarks! { value: code_hashes_bytes, }, ], - deploy_body: Some(body::repeated_dyn(r, vec![ + deploy_body: Some(body::repeated_dyn(T::MaxDelegateDependencies::get(), vec![ Counter(beneficiary_len as u32, code_hash_len as u32), // code_hash_ptr Regular(Instruction::Call(1)), ])), @@ -943,6 +943,7 @@ benchmarks! { assert_eq!(T::Currency::total_balance(&beneficiary), 0u32.into()); assert_eq!(T::Currency::balance(&instance.account_id), Pallet::::min_balance() * 2u32.into()); assert_ne!(T::Currency::balance_on_hold(&HoldReason::StorageDepositReserve.into(), &instance.account_id), 0u32.into()); + assert_eq!(ContractInfoOf::::get(&instance.account_id).unwrap().delegate_dependencies_count() as u32, T::MaxDelegateDependencies::get()); }: call(origin, instance.addr.clone(), 0u32.into(), Weight::MAX, None, vec![]) verify { if r > 0 { diff --git a/substrate/frame/contracts/src/storage.rs b/substrate/frame/contracts/src/storage.rs index 52c5150ca21..e99afd5d42e 100644 --- a/substrate/frame/contracts/src/storage.rs +++ b/substrate/frame/contracts/src/storage.rs @@ -108,6 +108,11 @@ impl ContractInfo { Ok(contract) } + /// Returns the number of locked delegate dependencies. + pub fn delegate_dependencies_count(&self) -> usize { + self.delegate_dependencies.len() + } + /// Associated child trie unique id is built from the hash part of the trie id. pub fn child_trie_info(&self) -> ChildInfo { ChildInfo::new_default(self.trie_id.as_ref()) -- GitLab From ea458d0b95d819d31683a8a09ca7973ae10b49be Mon Sep 17 00:00:00 2001 From: cuinix <65650185+cuinix@users.noreply.github.com> Date: Sat, 9 Mar 2024 05:28:04 +0800 Subject: [PATCH 69/86] fix some typos (#3587) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: cuinix <915115094@qq.com> Co-authored-by: Bastian KΓΆcher --- bridges/primitives/test-utils/src/lib.rs | 2 +- cumulus/client/parachain-inherent/src/lib.rs | 4 ++-- .../parachains/integration-tests/emulated/common/src/impls.rs | 2 +- .../runtimes/coretime/coretime-rococo/src/xcm_config.rs | 2 +- .../runtimes/coretime/coretime-westend/src/xcm_config.rs | 2 +- .../runtimes/people/people-rococo/src/xcm_config.rs | 2 +- .../runtimes/people/people-westend/src/xcm_config.rs | 2 +- polkadot/cli/src/cli.rs | 2 +- polkadot/node/core/dispute-coordinator/src/initialized.rs | 2 +- polkadot/node/core/pvf-checker/src/lib.rs | 4 ++-- polkadot/node/subsystem-bench/README.md | 4 ++-- polkadot/node/subsystem-bench/src/lib/availability/mod.rs | 2 +- polkadot/node/subsystem-bench/src/lib/mock/av_store.rs | 2 +- polkadot/node/subsystem-bench/src/lib/network.rs | 4 ++-- polkadot/primitives/src/v6/slashing.rs | 2 +- polkadot/runtime/parachains/src/paras/mod.rs | 2 +- substrate/bin/node/cli/src/cli.rs | 2 +- templates/parachain/node/src/cli.rs | 2 +- templates/solochain/node/src/cli.rs | 2 +- 19 files changed, 23 insertions(+), 23 deletions(-) diff --git a/bridges/primitives/test-utils/src/lib.rs b/bridges/primitives/test-utils/src/lib.rs index f23ddd1a10d..1d80890779b 100644 --- a/bridges/primitives/test-utils/src/lib.rs +++ b/bridges/primitives/test-utils/src/lib.rs @@ -129,7 +129,7 @@ pub fn make_justification_for_header( votes_ancestries.push(child.clone()); } - // The header we need to use when pre-commiting is the one at the highest height + // The header we need to use when pre-committing is the one at the highest height // on our chain. let precommit_candidate = chain.last().map(|h| (h.hash(), *h.number())).unwrap(); unsigned_precommits.push(precommit_candidate); diff --git a/cumulus/client/parachain-inherent/src/lib.rs b/cumulus/client/parachain-inherent/src/lib.rs index 57353638e19..051eb6764c8 100644 --- a/cumulus/client/parachain-inherent/src/lib.rs +++ b/cumulus/client/parachain-inherent/src/lib.rs @@ -159,7 +159,7 @@ impl ParachainInherentDataProvider { target: LOG_TARGET, relay_parent = ?relay_parent, error = ?e, - "An error occured during requesting the downward messages.", + "An error occurred during requesting the downward messages.", ); }) .ok()?; @@ -171,7 +171,7 @@ impl ParachainInherentDataProvider { target: LOG_TARGET, relay_parent = ?relay_parent, error = ?e, - "An error occured during requesting the inbound HRMP messages.", + "An error occurred during requesting the inbound HRMP messages.", ); }) .ok()?; diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index 4bbb4701e43..c93484829fe 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -265,7 +265,7 @@ macro_rules! impl_assert_events_helpers_for_relay_chain { $crate::impls::assert_expected_events!( Self, vec![ - // XCM is succesfully received and proccessed + // XCM is successfully received and proccessed [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { origin: $crate::impls::AggregateMessageOrigin::Ump($crate::impls::UmpQueueId::Para(id)), weight_used, diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs index 37bb8809dab..1722e1fcb31 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs @@ -192,7 +192,7 @@ pub type Barrier = TrailingSetTopicAsId< AllowKnownQueryResponses, WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, // Parent and its pluralities (i.e. governance bodies) get free execution. diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs index 44049adf027..b03c7748fe0 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs @@ -198,7 +198,7 @@ pub type Barrier = TrailingSetTopicAsId< AllowKnownQueryResponses, WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, // Parent, its pluralities (i.e. governance bodies), and the Fellows plurality diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs index 311128a17ca..534d6519086 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs @@ -207,7 +207,7 @@ pub type Barrier = TrailingSetTopicAsId< AllowKnownQueryResponses, WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, // Parent and its pluralities (i.e. governance bodies) get free execution. diff --git a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs index 4b7da91c17e..6353a581352 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs @@ -214,7 +214,7 @@ pub type Barrier = TrailingSetTopicAsId< AllowKnownQueryResponses, WithComputedOrigin< ( - // If the message is one that immediately attemps to pay for execution, then + // If the message is one that immediately attempts to pay for execution, then // allow it. AllowTopLevelPaidExecutionFrom, // Parent, its pluralities (i.e. governance bodies), and the Fellows plurality diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index 30f35ebcb6f..e1bc2309a94 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -52,7 +52,7 @@ pub enum Subcommand { /// Try-runtime has migrated to a standalone CLI /// (). The subcommand exists as a stub and - /// deprecation notice. It will be removed entirely some time after Janurary 2024. + /// deprecation notice. It will be removed entirely some time after January 2024. TryRuntime, /// Key management CLI utilities diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index 54e0410268f..5f86da87f21 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -99,7 +99,7 @@ pub(crate) struct Initialized { /// This is the highest `SessionIndex` seen via `ActiveLeavesUpdate`. It doesn't matter if it /// was cached successfully or not. It is used to detect ancient disputes. highest_session_seen: SessionIndex, - /// Will be set to `true` if an error occured during the last caching attempt + /// Will be set to `true` if an error occurred during the last caching attempt gaps_in_cache: bool, spam_slots: SpamSlots, participation: Participation, diff --git a/polkadot/node/core/pvf-checker/src/lib.rs b/polkadot/node/core/pvf-checker/src/lib.rs index ae0fae6b4f9..c00ec0d952f 100644 --- a/polkadot/node/core/pvf-checker/src/lib.rs +++ b/polkadot/node/core/pvf-checker/src/lib.rs @@ -415,7 +415,7 @@ async fn check_signing_credentials( gum::warn!( target: LOG_TARGET, relay_parent = ?leaf, - "error occured during requesting validators: {:?}", + "error occurred during requesting validators: {:?}", e ); return None @@ -508,7 +508,7 @@ async fn sign_and_submit_pvf_check_statement( target: LOG_TARGET, ?relay_parent, ?validation_code_hash, - "error occured during submitting a vote: {:?}", + "error occurred during submitting a vote: {:?}", e, ); }, diff --git a/polkadot/node/subsystem-bench/README.md b/polkadot/node/subsystem-bench/README.md index 3aac2810ad5..94a449ed0bb 100644 --- a/polkadot/node/subsystem-bench/README.md +++ b/polkadot/node/subsystem-bench/README.md @@ -45,7 +45,7 @@ docker compose up Please follow the [official installation guide](https://prometheus.io/docs/prometheus/latest/installation/) for your platform/OS. -After succesfully installing and starting up Prometheus, we need to alter it's configuration such that it +After successfully installing and starting up Prometheus, we need to alter it's configuration such that it will scrape the benchmark prometheus endpoint `127.0.0.1:9999`. Please check the prometheus official documentation regarding the location of `prometheus.yml`. On MacOS for example the full path `/opt/homebrew/etc/prometheus.yml` @@ -211,7 +211,7 @@ You can select log target, subtarget and verbosity just like with Polkadot node ### View test metrics -Assuming the Grafana/Prometheus stack installation steps completed succesfully, you should be able to +Assuming the Grafana/Prometheus stack installation steps completed successfully, you should be able to view the test progress in real time by accessing [this link](http://localhost:3000/goto/SM5B8pNSR?orgId=1). Now run diff --git a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs index f012a5a907e..dc4e1e40310 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs @@ -598,7 +598,7 @@ pub async fn benchmark_availability_write( gum::info!(target: LOG_TARGET, "Waiting for all emulated peers to receive their chunk from us ..."); for receiver in receivers.into_iter() { - let response = receiver.await.expect("Chunk is always served succesfully"); + let response = receiver.await.expect("Chunk is always served successfully"); // TODO: check if chunk is the one the peer expects to receive. assert!(response.result.is_ok()); } diff --git a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs index 41c4fe2cbad..9064f17940f 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs @@ -97,7 +97,7 @@ impl HandleNetworkMessage for NetworkAvailabilityState { outgoing_request .pending_response .send(response) - .expect("Response is always sent succesfully"); + .expect("Response is always sent successfully"); None }, _ => Some(NetworkMessage::RequestFromNode(peer, request)), diff --git a/polkadot/node/subsystem-bench/src/lib/network.rs b/polkadot/node/subsystem-bench/src/lib/network.rs index 1e09441792d..1bc35a014ff 100644 --- a/polkadot/node/subsystem-bench/src/lib/network.rs +++ b/polkadot/node/subsystem-bench/src/lib/network.rs @@ -219,7 +219,7 @@ impl Future for ProxiedRequest { Poll::Ready(response) => Poll::Ready(ProxiedResponse { sender: self.sender.take().expect("sender already used"), result: response - .expect("Response is always succesfully received.") + .expect("Response is always successfully received.") .result .map_err(|_| RequestFailure::Refused), }), @@ -761,7 +761,7 @@ pub fn new_network( gum::info!(target: LOG_TARGET, "{}",format!("connectivity {}%, latency {:?}", config.connectivity, config.latency).bright_black()); let metrics = - Metrics::new(&dependencies.registry).expect("Metrics always register succesfully"); + Metrics::new(&dependencies.registry).expect("Metrics always register successfully"); let mut validator_authority_id_mapping = HashMap::new(); // Create the channel from `peer` to `NetworkInterface` . diff --git a/polkadot/primitives/src/v6/slashing.rs b/polkadot/primitives/src/v6/slashing.rs index efffb932cdc..bcd7d0c2fc4 100644 --- a/polkadot/primitives/src/v6/slashing.rs +++ b/polkadot/primitives/src/v6/slashing.rs @@ -54,7 +54,7 @@ impl DisputesTimeSlot { /// is required to identify and verify it. #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, Debug)] pub struct DisputeProof { - /// Time slot when the dispute occured. + /// Time slot when the dispute occurred. pub time_slot: DisputesTimeSlot, /// The dispute outcome. pub kind: SlashingOffenceKind, diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index 39371391b1f..5acdb9683bb 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -1374,7 +1374,7 @@ impl Pallet { outgoing } - // note replacement of the code of para with given `id`, which occured in the + // note replacement of the code of para with given `id`, which occurred in the // context of the given relay-chain block number. provide the replaced code. // // `at` for para-triggered replacement is the block number of the relay-chain diff --git a/substrate/bin/node/cli/src/cli.rs b/substrate/bin/node/cli/src/cli.rs index f3c0435fd32..3345afae4fd 100644 --- a/substrate/bin/node/cli/src/cli.rs +++ b/substrate/bin/node/cli/src/cli.rs @@ -63,7 +63,7 @@ pub enum Subcommand { /// Try-runtime has migrated to a standalone CLI /// (). The subcommand exists as a stub and - /// deprecation notice. It will be removed entirely some time after Janurary 2024. + /// deprecation notice. It will be removed entirely some time after January 2024. TryRuntime, /// Key management cli utilities diff --git a/templates/parachain/node/src/cli.rs b/templates/parachain/node/src/cli.rs index 73ef996b750..a5c55b8118a 100644 --- a/templates/parachain/node/src/cli.rs +++ b/templates/parachain/node/src/cli.rs @@ -40,7 +40,7 @@ pub enum Subcommand { /// Try-runtime has migrated to a standalone /// [CLI](). The subcommand exists as a stub and - /// deprecation notice. It will be removed entirely some time after Janurary 2024. + /// deprecation notice. It will be removed entirely some time after January 2024. TryRuntime, } diff --git a/templates/solochain/node/src/cli.rs b/templates/solochain/node/src/cli.rs index 98037eb886a..7f1b67b3696 100644 --- a/templates/solochain/node/src/cli.rs +++ b/templates/solochain/node/src/cli.rs @@ -43,7 +43,7 @@ pub enum Subcommand { /// Try-runtime has migrated to a standalone CLI /// (). The subcommand exists as a stub and - /// deprecation notice. It will be removed entirely some time after Janurary 2024. + /// deprecation notice. It will be removed entirely some time after January 2024. TryRuntime, /// Db meta columns information. -- GitLab From 9f5d9fa96f88007292c2d59810b7c3c855da58a7 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Sat, 9 Mar 2024 11:14:06 +0100 Subject: [PATCH 70/86] core: replace secp256k with k256 in crypto::ecdsa (#3525) This PR replaces the usage of [secp256k](https://crates.io/crates/secp256k1) crate with [k256](https://crates.io/crates/k256) in `core::crypto::ecdsa` for `non-std` environments as outcome of discussion in #3448. `secp256k1` is used in `std`, meaning that we should not affect host performance with this PR. `k256` is enabled in runtimes (`no-std`), and is required to proceed with #2044. If desirable, in future we can switch to `k256` also for `std`. That would require some performance evaluation (e.g. for EVM chains as per https://github.com/paritytech/polkadot-sdk/issues/3448#issuecomment-1976780391). Closes https://github.com/paritytech/polkadot-sdk/issues/3448 --------- Co-authored-by: command-bot <> Co-authored-by: Davide Galassi --- Cargo.lock | 27 ++++- substrate/primitives/core/Cargo.toml | 9 +- substrate/primitives/core/src/ecdsa.rs | 145 +++++++++++++++++-------- 3 files changed, 129 insertions(+), 52 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e38f09db4d3..0b7ea71a536 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4815,6 +4815,7 @@ dependencies = [ "digest 0.10.7", "elliptic-curve", "rfc6979", + "serdect", "signature", "spki", ] @@ -4881,9 +4882,9 @@ checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "elliptic-curve" -version = "0.13.5" +version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ "base16ct", "crypto-bigint", @@ -4894,6 +4895,7 @@ dependencies = [ "pkcs8", "rand_core 0.6.4", "sec1", + "serdect", "subtle 2.5.0", "zeroize", ] @@ -6971,14 +6973,15 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.1" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ "cfg-if", "ecdsa", "elliptic-curve", "once_cell", + "serdect", "sha2 0.10.7", ] @@ -17101,6 +17104,7 @@ dependencies = [ "der", "generic-array 0.14.7", "pkcs8", + "serdect", "subtle 2.5.0", "zeroize", ] @@ -17359,6 +17363,16 @@ dependencies = [ "unsafe-libyaml", ] +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct", + "serde", +] + [[package]] name = "serial_test" version = "2.0.0" @@ -18567,6 +18581,7 @@ dependencies = [ "hash256-std-hasher", "impl-serde", "itertools 0.10.5", + "k256", "lazy_static", "libsecp256k1", "log", @@ -22553,9 +22568,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 70ca7be6a2d..f7cc2b4fcf7 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -52,9 +52,12 @@ blake2 = { version = "0.10.4", default-features = false, optional = true } libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"], optional = true } schnorrkel = { version = "0.11.4", features = ["preaudit_deprecated"], default-features = false } merlin = { version = "3.0", default-features = false } -secp256k1 = { version = "0.28.0", default-features = false, features = ["alloc", "recovery"], optional = true } sp-crypto-hashing = { path = "../crypto/hashing", default-features = false, optional = true } sp-runtime-interface = { path = "../runtime-interface", default-features = false } +# k256 crate, better portability, intended to be used in substrate-runtimes (no-std) +k256 = { version = "0.13.3", features = ["alloc", "ecdsa"], default-features = false } +# secp256k1 crate, better performance, intended to be used on host side (std) +secp256k1 = { version = "0.28.0", default-features = false, features = ["alloc", "recovery"], optional = true } # bls crypto w3f-bls = { version = "0.1.3", default-features = false, optional = true } @@ -76,6 +79,7 @@ bench = false [features] default = ["std"] + std = [ "array-bytes", "bandersnatch_vrfs?/std", @@ -94,6 +98,7 @@ std = [ "hash256-std-hasher/std", "impl-serde/std", "itertools", + "k256/std", "libsecp256k1/std", "log/std", "merlin/std", @@ -132,6 +137,7 @@ serde = [ "bs58/alloc", "dep:serde", "impl-serde", + "k256/serde", "primitive-types/serde_no_std", "scale-info/serde", "secrecy/alloc", @@ -147,7 +153,6 @@ full_crypto = [ "blake2", "ed25519-zebra", "libsecp256k1", - "secp256k1", "sp-crypto-hashing", "sp-runtime-interface/disable_target_static_assertions", ] diff --git a/substrate/primitives/core/src/ecdsa.rs b/substrate/primitives/core/src/ecdsa.rs index f172b3a7d02..e6bd2c7eb57 100644 --- a/substrate/primitives/core/src/ecdsa.rs +++ b/substrate/primitives/core/src/ecdsa.rs @@ -28,14 +28,15 @@ use crate::crypto::{ }; #[cfg(feature = "full_crypto")] use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}; -#[cfg(all(feature = "full_crypto", not(feature = "std")))] -use secp256k1::Secp256k1; -#[cfg(feature = "std")] -use secp256k1::SECP256K1; -#[cfg(feature = "full_crypto")] + +#[cfg(all(not(feature = "std"), feature = "full_crypto"))] +use k256::ecdsa::SigningKey as SecretKey; +#[cfg(not(feature = "std"))] +use k256::ecdsa::VerifyingKey; +#[cfg(all(feature = "std", feature = "full_crypto"))] use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, - Message, PublicKey, SecretKey, + Message, PublicKey, SecretKey, SECP256K1, }; #[cfg(feature = "serde")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; @@ -53,9 +54,9 @@ pub const PUBLIC_KEY_SERIALIZED_SIZE: usize = 33; /// The byte length of signature pub const SIGNATURE_SERIALIZED_SIZE: usize = 65; -/// A secret seed (which is bytewise essentially equivalent to a SecretKey). +/// The secret seed. /// -/// We need it as a different type because `Seed` is expected to be AsRef<[u8]>. +/// The raw secret seed, which can be used to create the `Pair`. #[cfg(feature = "full_crypto")] type Seed = [u8; 32]; @@ -96,18 +97,21 @@ impl Public { /// Create a new instance from the given full public key. /// /// This will convert the full public key into the compressed format. - #[cfg(feature = "std")] pub fn from_full(full: &[u8]) -> Result { - let pubkey = if full.len() == 64 { + let mut tagged_full = [0u8; 65]; + let full = if full.len() == 64 { // Tag it as uncompressed public key. - let mut tagged_full = [0u8; 65]; tagged_full[0] = 0x04; tagged_full[1..].copy_from_slice(full); - secp256k1::PublicKey::from_slice(&tagged_full) + &tagged_full } else { - secp256k1::PublicKey::from_slice(full) + full }; - pubkey.map(|k| Self(k.serialize())).map_err(|_| ()) + #[cfg(feature = "std")] + let pubkey = PublicKey::from_slice(&full); + #[cfg(not(feature = "std"))] + let pubkey = VerifyingKey::from_sec1_bytes(&full); + pubkey.map(|k| k.into()).map_err(|_| ()) } } @@ -131,6 +135,24 @@ impl AsMut<[u8]> for Public { } } +#[cfg(feature = "std")] +impl From for Public { + fn from(pubkey: PublicKey) -> Self { + Self(pubkey.serialize()) + } +} + +#[cfg(not(feature = "std"))] +impl From for Public { + fn from(pubkey: VerifyingKey) -> Self { + Self::unchecked_from( + pubkey.to_sec1_bytes()[..] + .try_into() + .expect("valid key is serializable to [u8,33]. qed."), + ) + } +} + impl TryFrom<&[u8]> for Public { type Error = (); @@ -331,23 +353,34 @@ impl Signature { /// Recover the public key from this signature and a pre-hashed message. #[cfg(feature = "full_crypto")] pub fn recover_prehashed(&self, message: &[u8; 32]) -> Option { - let rid = RecoveryId::from_i32(self.0[64] as i32).ok()?; - let sig = RecoverableSignature::from_compact(&self.0[..64], rid).ok()?; - let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); - #[cfg(feature = "std")] - let context = SECP256K1; + { + let rid = RecoveryId::from_i32(self.0[64] as i32).ok()?; + let sig = RecoverableSignature::from_compact(&self.0[..64], rid).ok()?; + let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); + SECP256K1.recover_ecdsa(&message, &sig).ok().map(Public::from) + } + #[cfg(not(feature = "std"))] - let context = Secp256k1::verification_only(); + { + let rid = k256::ecdsa::RecoveryId::from_byte(self.0[64])?; + let sig = k256::ecdsa::Signature::from_bytes((&self.0[..64]).into()).ok()?; + VerifyingKey::recover_from_prehash(message, &sig, rid).map(Public::from).ok() + } + } +} - context - .recover_ecdsa(&message, &sig) - .ok() - .map(|pubkey| Public(pubkey.serialize())) +#[cfg(not(feature = "std"))] +impl From<(k256::ecdsa::Signature, k256::ecdsa::RecoveryId)> for Signature { + fn from(recsig: (k256::ecdsa::Signature, k256::ecdsa::RecoveryId)) -> Signature { + let mut r = Self::default(); + r.0[..64].copy_from_slice(&recsig.0.to_bytes()); + r.0[64] = recsig.1.to_byte(); + r } } -#[cfg(feature = "full_crypto")] +#[cfg(all(feature = "std", feature = "full_crypto"))] impl From for Signature { fn from(recsig: RecoverableSignature) -> Signature { let mut r = Self::default(); @@ -384,17 +417,19 @@ impl TraitPair for Pair { /// /// You should never need to use this; generate(), generate_with_phrase fn from_seed_slice(seed_slice: &[u8]) -> Result { - let secret = - SecretKey::from_slice(seed_slice).map_err(|_| SecretStringError::InvalidSeedLength)?; - #[cfg(feature = "std")] - let context = SECP256K1; - #[cfg(not(feature = "std"))] - let context = Secp256k1::signing_only(); + { + let secret = SecretKey::from_slice(seed_slice) + .map_err(|_| SecretStringError::InvalidSeedLength)?; + Ok(Pair { public: PublicKey::from_secret_key(&SECP256K1, &secret).into(), secret }) + } - let public = PublicKey::from_secret_key(&context, &secret); - let public = Public(public.serialize()); - Ok(Pair { public, secret }) + #[cfg(not(feature = "std"))] + { + let secret = SecretKey::from_slice(seed_slice) + .map_err(|_| SecretStringError::InvalidSeedLength)?; + Ok(Pair { public: VerifyingKey::from(&secret).into(), secret }) + } } /// Derive a child key from a series of given junctions. @@ -438,7 +473,14 @@ impl TraitPair for Pair { impl Pair { /// Get the seed for this key. pub fn seed(&self) -> Seed { - self.secret.secret_bytes() + #[cfg(feature = "std")] + { + self.secret.secret_bytes() + } + #[cfg(not(feature = "std"))] + { + self.secret.to_bytes().into() + } } /// Exactly as `from_string` except that if no matches are found then, the the first 32 @@ -455,14 +497,19 @@ impl Pair { /// Sign a pre-hashed message pub fn sign_prehashed(&self, message: &[u8; 32]) -> Signature { - let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); - #[cfg(feature = "std")] - let context = SECP256K1; - #[cfg(not(feature = "std"))] - let context = Secp256k1::signing_only(); + { + let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); + SECP256K1.sign_ecdsa_recoverable(&message, &self.secret).into() + } - context.sign_ecdsa_recoverable(&message, &self.secret).into() + #[cfg(not(feature = "std"))] + { + self.secret + .sign_prehash_recoverable(message) + .expect("signing may not fail (???). qed.") + .into() + } } /// Verify a signature on a pre-hashed message. Return `true` if the signature is valid @@ -503,7 +550,7 @@ impl Pair { // NOTE: this solution is not effective when `Pair` is moved around memory. // The very same problem affects other cryptographic backends that are just using // `zeroize`for their secrets. -#[cfg(feature = "full_crypto")] +#[cfg(all(feature = "std", feature = "full_crypto"))] impl Drop for Pair { fn drop(&mut self) { self.secret.non_secure_erase() @@ -770,8 +817,18 @@ mod test { let msg = [0u8; 32]; let sig1 = pair.sign_prehashed(&msg); let sig2: Signature = { - let message = Message::from_digest_slice(&msg).unwrap(); - SECP256K1.sign_ecdsa_recoverable(&message, &pair.secret).into() + #[cfg(feature = "std")] + { + let message = Message::from_digest_slice(&msg).unwrap(); + SECP256K1.sign_ecdsa_recoverable(&message, &pair.secret).into() + } + #[cfg(not(feature = "std"))] + { + pair.secret + .sign_prehash_recoverable(&msg) + .expect("signing may not fail (???). qed.") + .into() + } }; assert_eq!(sig1, sig2); -- GitLab From 1c435e91c117b877c803427a91c0ccd18c527382 Mon Sep 17 00:00:00 2001 From: Viki Val Date: Sat, 9 Mar 2024 07:32:53 -0800 Subject: [PATCH 71/86] :bug: Depositing PalletAttributeSet on incorrect nft (#2740) ## Context Implementing `HolderOf(collection_id)` we have observed a fancy glitch where pallet deposits event with incorrect values ### Test case [Observe following extrinsic](https://assethub-polkadot.subscan.io/extrinsic/0xdc72321b7674aa209c2f194ed49bd6bd12708af103f98b5b9196e0132dcba777) To mint in collection `51` user needs to be `HolderOf(50)`. Therefore current user is owner of item `394` `witness_data { owned_item: 394 }` All checking is done correctly, storage is updated correctly ![photo_2023-12-18 16 07 11](https://github.com/paritytech/polkadot-sdk/assets/22471030/ca991272-156d-4db1-97b2-1a2873fc5d3f) However the event which is emitted does not make semantic sense as we updated storage for `50-394` not for `51-114` ![photo_2023-12-18 16 07 17](https://github.com/paritytech/polkadot-sdk/assets/22471030/c998a92c-e306-4433-aad8-103078140e23) ## The fix This PR fixes that depositing `PalletAttributeSet` emits correct values. --------- Co-authored-by: Jegor Sidorenko Co-authored-by: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> --- substrate/frame/nfts/src/lib.rs | 4 ++-- substrate/frame/nfts/src/tests.rs | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/substrate/frame/nfts/src/lib.rs b/substrate/frame/nfts/src/lib.rs index 7cf7cdc61a7..615720268fe 100644 --- a/substrate/frame/nfts/src/lib.rs +++ b/substrate/frame/nfts/src/lib.rs @@ -874,8 +874,8 @@ pub mod pallet { ), ); Self::deposit_event(Event::PalletAttributeSet { - collection, - item: Some(item), + collection: collection_id, + item: Some(owned_item), attribute: pallet_attribute, value: attribute_value, }); diff --git a/substrate/frame/nfts/src/tests.rs b/substrate/frame/nfts/src/tests.rs index 9e521537534..6bf9427f4e6 100644 --- a/substrate/frame/nfts/src/tests.rs +++ b/substrate/frame/nfts/src/tests.rs @@ -440,6 +440,12 @@ fn mint_should_work() { account(2), Some(MintWitness { owned_item: Some(43), ..Default::default() }) )); + assert!(events().contains(&Event::::PalletAttributeSet { + collection: 0, + item: Some(43), + attribute: PalletAttributes::<::CollectionId>::UsedToClaim(1), + value: Nfts::construct_attribute_value(vec![]).unwrap(), + })); // can't mint twice assert_noop!( -- GitLab From bbaa5a3bb5451fc0c51fd42fd91d765f5589c033 Mon Sep 17 00:00:00 2001 From: Muharem Date: Sun, 10 Mar 2024 22:24:41 +0100 Subject: [PATCH 72/86] Asset Conversion: doc guide to address incorrect exchange rate for non-withdrawable pool (#3275) This documentation guide to address an incorrect exchange rate for a non-withdrawable pool. --- substrate/frame/asset-conversion/src/lib.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/substrate/frame/asset-conversion/src/lib.rs b/substrate/frame/asset-conversion/src/lib.rs index f13d40d3e7e..c9725f9d39d 100644 --- a/substrate/frame/asset-conversion/src/lib.rs +++ b/substrate/frame/asset-conversion/src/lib.rs @@ -413,6 +413,11 @@ pub mod pallet { /// Params `amount1_min`/`amount2_min` represent that. /// `mint_to` will be sent the liquidity tokens that represent this share of the pool. /// + /// NOTE: when encountering an incorrect exchange rate and non-withdrawable pool liquidity, + /// batch an atomic call with [`Pallet::add_liquidity`] and + /// [`Pallet::swap_exact_tokens_for_tokens`] or [`Pallet::swap_tokens_for_exact_tokens`] + /// calls to render the liquidity withdrawable and rectify the exchange rate. + /// /// Once liquidity is added, someone may successfully call /// [`Pallet::swap_exact_tokens_for_tokens`] successfully. #[pallet::call_index(1)] -- GitLab From a6713c55fd5082d333518c3ca13f2a4294726fcc Mon Sep 17 00:00:00 2001 From: Egor_P Date: Mon, 11 Mar 2024 16:37:22 +0700 Subject: [PATCH 73/86] Fix release docker image GHA (#3547) This PR add extra checks for the input fields in the GHA which does the docker image build and publishing. --- .github/scripts/common/lib.sh | 96 ++++++++++++++++++- .../workflows/release-50_publish-docker.yml | 16 +++- .../workflows/release-99_notif-published.yml | 10 +- 3 files changed, 107 insertions(+), 15 deletions(-) diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index bd12d9c6e6f..29dc269ffd2 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -237,6 +237,61 @@ fetch_release_artifacts() { popd > /dev/null } +# Fetch the release artifacts like binary and sigantures from S3. Assumes the ENV are set: +# - RELEASE_ID +# - GITHUB_TOKEN +# - REPO in the form paritytech/polkadot +fetch_release_artifacts_from_s3() { + echo "Version : $VERSION" + echo "Repo : $REPO" + echo "Binary : $BINARY" + OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${BINARY}"} + echo "OUTPUT_DIR : $OUTPUT_DIR" + + URL_BASE=$(get_s3_url_base $BINARY) + echo "URL_BASE=$URL_BASE" + + URL_BINARY=$URL_BASE/$VERSION/$BINARY + URL_SHA=$URL_BASE/$VERSION/$BINARY.sha256 + URL_ASC=$URL_BASE/$VERSION/$BINARY.asc + + # Fetch artifacts + mkdir -p "$OUTPUT_DIR" + pushd "$OUTPUT_DIR" > /dev/null + + echo "Fetching artifacts..." + for URL in $URL_BINARY $URL_SHA $URL_ASC; do + echo "Fetching %s" "$URL" + curl --progress-bar -LO "$URL" || echo "Missing $URL" + done + + pwd + ls -al --color + popd > /dev/null + +} + +# Pass the name of the binary as input, it will +# return the s3 base url +function get_s3_url_base() { + name=$1 + case $name in + polkadot | polkadot-execute-worker | polkadot-prepare-worker | staking-miner) + printf "https://releases.parity.io/polkadot" + ;; + + polkadot-parachain) + printf "https://releases.parity.io/cumulus" + ;; + + *) + printf "UNSUPPORTED BINARY $name" + exit 1 + ;; + esac +} + + # Check the checksum for a given binary function check_sha256() { echo "Checking SHA256 for $1" @@ -248,13 +303,11 @@ function check_sha256() { function import_gpg_keys() { GPG_KEYSERVER=${GPG_KEYSERVER:-"keyserver.ubuntu.com"} SEC="9D4B2B6EB8F97156D19669A9FF0812D491B96798" - WILL="2835EAF92072BC01D188AF2C4A092B93E97CE1E2" EGOR="E6FC4D4782EB0FA64A4903CCDB7D3555DD3932D3" - MARA="533C920F40E73A21EEB7E9EBF27AEA7E7594C9CF" MORGAN="2E92A9D8B15D7891363D1AE8AF9E6C43F7F8C4CF" echo "Importing GPG keys from $GPG_KEYSERVER in parallel" - for key in $SEC $WILL $EGOR $MARA $MORGAN; do + for key in $SEC $EGOR $MORGAN; do ( echo "Importing GPG key $key" gpg --no-tty --quiet --keyserver $GPG_KEYSERVER --recv-keys $key @@ -344,3 +397,40 @@ function find_runtimes() { done echo $JSON } + +# Filter the version matches the particular pattern and return it. +# input: version (v1.8.0 or v1.8.0-rc1) +# output: none +filter_version_from_input() { + version=$1 + regex="(^v[0-9]+\.[0-9]+\.[0-9]+)$|(^v[0-9]+\.[0-9]+\.[0-9]+-rc[0-9]+)$" + + if [[ $version =~ $regex ]]; then + if [ -n "${BASH_REMATCH[1]}" ]; then + echo "${BASH_REMATCH[1]}" + elif [ -n "${BASH_REMATCH[2]}" ]; then + echo "${BASH_REMATCH[2]}" + fi + else + echo "Invalid version: $version" + exit 1 + fi + +} + +# Check if the release_id is valid number +# input: release_id +# output: release_id or exit 1 +check_release_id() { + input=$1 + + release_id=$(echo "$input" | sed 's/[^0-9]//g') + + if [[ $release_id =~ ^[0-9]+$ ]]; then + echo "$release_id" + else + echo "Invalid release_id from input: $input" + exit 1 + fi + +} diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index ecbac01cd3a..67e93ee9657 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -36,7 +36,7 @@ on: -H "Authorization: Bearer ${GITHUB_TOKEN}" https://api.github.com/repos/$OWNER/$REPO/releases | \ jq '.[] | { name: .name, id: .id }' required: true - type: string + type: number registry: description: Container registry @@ -61,7 +61,6 @@ permissions: contents: write env: - RELEASE_ID: ${{ inputs.release_id }} ENGINE: docker REGISTRY: ${{ inputs.registry }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -71,6 +70,7 @@ env: # EVENT_ACTION: ${{ github.event.action }} EVENT_NAME: ${{ github.event_name }} IMAGE_TYPE: ${{ inputs.image_type }} + VERSION: ${{ inputs.version }} jobs: fetch-artifacts: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build @@ -95,13 +95,16 @@ jobs: # chmod a+x $BINARY # ls -al - - name: Fetch rc artifacts or release artifacts based on release id + - name: Fetch rc artifacts or release artifacts from s3 based on version #this step runs only if the workflow is triggered manually if: ${{ env.EVENT_NAME == 'workflow_dispatch' }} run: | . ./.github/scripts/common/lib.sh - fetch_release_artifacts + VERSION=$(filter_version_from_input "${{ inputs.version }}") + echo "VERSION=${VERSION}" >> $GITHUB_ENV + + fetch_release_artifacts_from_s3 - name: Cache the artifacts uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 @@ -147,7 +150,10 @@ jobs: if: ${{ env.IMAGE_TYPE == 'rc' }} id: fetch_rc_refs run: | - release=release-${{ inputs.release_id }} && \ + . ./.github/scripts/common/lib.sh + + RELEASE_ID=$(check_release_id "${{ inputs.release_id }}") + release=release-$RELEASE_ID && \ echo "release=${release}" >> $GITHUB_OUTPUT commit=$(git rev-parse --short HEAD) && \ diff --git a/.github/workflows/release-99_notif-published.yml b/.github/workflows/release-99_notif-published.yml index 732db15d9c0..05c9d6a47f5 100644 --- a/.github/workflows/release-99_notif-published.yml +++ b/.github/workflows/release-99_notif-published.yml @@ -16,12 +16,6 @@ jobs: - name: "RelEng: Polkadot Release Coordination" room: '!cqAmzdIcbOFwrdrubV:parity.io' pre-release: true - - name: 'General: Rust, Polkadot, Substrate' - room: '!aJymqQYtCjjqImFLSb:parity.io' - pre-release: false - - name: 'Team: DevOps' - room: '!lUslSijLMgNcEKcAiE:parity.io' - pre-release: true # External - name: 'Ledger <> Polkadot Coordination' @@ -48,7 +42,9 @@ jobs: access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} server: m.parity.io message: | - A (pre)release has been ${{github.event.action}} in **${{github.event.repository.full_name}}:**
+ @room + + A new node release has been ${{github.event.action}} in **${{github.event.repository.full_name}}:**
Release version: [${{github.event.release.tag_name}}](${{github.event.release.html_url}}) ----- -- GitLab From 8dc6048d5eb20886f5d0781d54e073441d3bd86c Mon Sep 17 00:00:00 2001 From: eskimor Date: Mon, 11 Mar 2024 15:11:55 +0100 Subject: [PATCH 74/86] Remove unused FullCandidateReceipt (#3641) Currently redesigning candidate data structures, noticed that this one seems dead. Co-authored-by: eskimor --- polkadot/primitives/src/v6/mod.rs | 12 --------- .../implementers-guide/src/types/README.md | 11 -------- .../implementers-guide/src/types/candidate.md | 26 ++----------------- 3 files changed, 2 insertions(+), 47 deletions(-) diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index 89431f7801f..cab34deeb50 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -533,18 +533,6 @@ impl CandidateReceipt { } } -/// All data pertaining to the execution of a para candidate. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug)] -pub struct FullCandidateReceipt { - /// The inner candidate receipt. - pub inner: CandidateReceipt, - /// The validation data derived from the relay-chain state at that - /// point. The hash of the persisted validation data should - /// match the `persisted_validation_data_hash` in the descriptor - /// of the receipt. - pub validation_data: PersistedValidationData, -} - /// A candidate-receipt with commitments directly included. #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Hash))] diff --git a/polkadot/roadmap/implementers-guide/src/types/README.md b/polkadot/roadmap/implementers-guide/src/types/README.md index 87092bf3a00..5fd3050f949 100644 --- a/polkadot/roadmap/implementers-guide/src/types/README.md +++ b/polkadot/roadmap/implementers-guide/src/types/README.md @@ -55,17 +55,6 @@ digraph { CandidateCommitmentsHash [label = "Hash", shape="doublecircle", fill="gray90"] CandidateCommitmentsHash -> CandidateCommitments:name - FullCandidateReceipt [label = < - - - - -
FullCandidateReceipt<H = Hash, N = BlockNumber>
innerCandidateReceipt<H>
validation_dataValidationData<N>
- >] - - FullCandidateReceipt:inner -> CandidateReceipt:name - FullCandidateReceipt:validation_data -> ValidationData:name - CommittedCandidateReceipt [label = < diff --git a/polkadot/roadmap/implementers-guide/src/types/candidate.md b/polkadot/roadmap/implementers-guide/src/types/candidate.md index 00176229e5a..399d7854ac4 100644 --- a/polkadot/roadmap/implementers-guide/src/types/candidate.md +++ b/polkadot/roadmap/implementers-guide/src/types/candidate.md @@ -20,12 +20,8 @@ struct ParaId(u32); ## Candidate Receipt -Much info in a [`FullCandidateReceipt`](#full-candidate-receipt) is duplicated from the relay-chain state. When the -corresponding relay-chain state is considered widely available, the Candidate Receipt should be favored over the -`FullCandidateReceipt`. - -Examples of situations where the state is readily available includes within the scope of work done by subsystems working -on a given relay-parent, or within the logic of the runtime importing a backed candidate. +Compact representation of the result of a validation. This is what validators +receive from collators, together with the PoV. ```rust /// A candidate-receipt. @@ -37,24 +33,6 @@ struct CandidateReceipt { } ``` -## Full Candidate Receipt - -This is the full receipt type. The `PersistedValidationData` are technically redundant with the `inner.relay_parent`, -which uniquely describes the block in the blockchain from whose state these values are derived. The -[`CandidateReceipt`](#candidate-receipt) variant is often used instead for this reason. - -However, the Full Candidate Receipt type is useful as a means of avoiding the implicit dependency on availability of old -blockchain state. In situations such as availability and approval, having the full description of the candidate within a -self-contained struct is convenient. - -```rust -/// All data pertaining to the execution of a para candidate. -struct FullCandidateReceipt { - inner: CandidateReceipt, - validation_data: PeristedValidationData, -} -``` - ## Committed Candidate Receipt This is a variant of the candidate receipt which includes the commitments of the candidate receipt alongside the -- GitLab From aa35328371d967e6af20c03e86db0b6e5c11cdf8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=B3nal=20Murray?= Date: Mon, 11 Mar 2024 21:12:27 +0700 Subject: [PATCH 75/86] [pallet_broker] Fix `adapt_price` behaviour at zero (#3636) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes the behaviour of `Linear` which is the default implementation of the `AdaptPrice` trait in the broker pallet. Previously if cores were offered but not sold in only one sale, the price would be set to zero and due to the logic being purely multiplicative, the price would stay at 0 indefinitely. This could be further paired with a configurable minimum in the broker pallet itself, which will be a future PR. This affects the Rococo and Westend Coretime chains, but Kusama has a different implementation so this isn't required for the Kusama launch. I actually thought I opened this a while ago. --------- Co-authored-by: Bastian KΓΆcher --- prdoc/pr_3636.prdoc | 15 +++++++++ substrate/frame/broker/src/adapt_price.rs | 38 ++++++++++++++++++++--- 2 files changed, 49 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_3636.prdoc diff --git a/prdoc/pr_3636.prdoc b/prdoc/pr_3636.prdoc new file mode 100644 index 00000000000..934158ac102 --- /dev/null +++ b/prdoc/pr_3636.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[pallet_broker] Fix `Linear::adapt_price` behavior at zero" + +doc: + - audience: Runtime Dev + description: | + This fixes the behaviour of `Linear` which is the default implementation of the `AdaptPrice` + trait in the broker pallet. Previously if cores were offered but not sold in only one sale, + the price would be set to zero and due to the logic being purely multiplicative, the price + would stay at 0 indefinitely. + +crates: + - name: pallet-broker diff --git a/substrate/frame/broker/src/adapt_price.rs b/substrate/frame/broker/src/adapt_price.rs index 8266625687a..fbcd7afdf0d 100644 --- a/substrate/frame/broker/src/adapt_price.rs +++ b/substrate/frame/broker/src/adapt_price.rs @@ -19,6 +19,7 @@ use crate::CoreIndex; use sp_arithmetic::{traits::One, FixedU64}; +use sp_runtime::Saturating; /// Type for determining how to set price. pub trait AdaptPrice { @@ -49,14 +50,24 @@ impl AdaptPrice for () { pub struct Linear; impl AdaptPrice for Linear { fn leadin_factor_at(when: FixedU64) -> FixedU64 { - FixedU64::from(2) - when + FixedU64::from(2).saturating_sub(when) } fn adapt_price(sold: CoreIndex, target: CoreIndex, limit: CoreIndex) -> FixedU64 { if sold <= target { - FixedU64::from_rational(sold.into(), target.into()) + // Range of [0.5, 1.0]. + FixedU64::from_rational(1, 2).saturating_add(FixedU64::from_rational( + sold.into(), + target.saturating_mul(2).into(), + )) } else { - FixedU64::one() + - FixedU64::from_rational((sold - target).into(), (limit - target).into()) + // Range of (1.0, 2]. + + // Unchecked math: In this branch we know that sold > target. The limit must be >= sold + // by construction, and thus target must be < limit. + FixedU64::one().saturating_add(FixedU64::from_rational( + (sold - target).into(), + (limit - target).into(), + )) } } } @@ -81,4 +92,23 @@ mod tests { } } } + + #[test] + fn linear_bound_check() { + // Using constraints from pallet implementation i.e. `limit >= sold`. + // Check extremes + let limit = 10; + let target = 5; + + // Maximally sold: `sold == limit` + assert_eq!(Linear::adapt_price(limit, target, limit), FixedU64::from_float(2.0)); + // Ideally sold: `sold == target` + assert_eq!(Linear::adapt_price(target, target, limit), FixedU64::one()); + // Minimally sold: `sold == 0` + assert_eq!(Linear::adapt_price(0, target, limit), FixedU64::from_float(0.5)); + // Optimistic target: `target == limit` + assert_eq!(Linear::adapt_price(limit, limit, limit), FixedU64::one()); + // Pessimistic target: `target == 0` + assert_eq!(Linear::adapt_price(limit, 0, limit), FixedU64::from_float(2.0)); + } } -- GitLab From 4249a3d6cbca1da2ee15237c05691005973502e9 Mon Sep 17 00:00:00 2001 From: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Date: Mon, 11 Mar 2024 15:38:44 +0100 Subject: [PATCH 76/86] Remove getters from `im-online` pallet (#3589) As I've been dancing around this pallet for quite some time anyway, I decided to remove getters at once. There were only a few leftovers in tests. Related: #3326 CC @muraca --- prdoc/pr_3589.prdoc | 10 ++++++++++ substrate/frame/im-online/src/lib.rs | 12 ++++-------- substrate/frame/im-online/src/tests.rs | 9 +++------ 3 files changed, 17 insertions(+), 14 deletions(-) create mode 100644 prdoc/pr_3589.prdoc diff --git a/prdoc/pr_3589.prdoc b/prdoc/pr_3589.prdoc new file mode 100644 index 00000000000..98b9b1039ab --- /dev/null +++ b/prdoc/pr_3589.prdoc @@ -0,0 +1,10 @@ +title: Removed `pallet::getter` usage from `pallet-im-online` + +doc: + - audience: Runtime Dev + description: | + This PR removes `pallet::getter` usage from `pallet-im-online`, and updates dependant code accordingly. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-im-online diff --git a/substrate/frame/im-online/src/lib.rs b/substrate/frame/im-online/src/lib.rs index f14093aa09a..239b47834d1 100644 --- a/substrate/frame/im-online/src/lib.rs +++ b/substrate/frame/im-online/src/lib.rs @@ -338,26 +338,22 @@ pub mod pallet { /// progress estimate from `NextSessionRotation`, as those estimates should be /// more accurate then the value we calculate for `HeartbeatAfter`. #[pallet::storage] - #[pallet::getter(fn heartbeat_after)] - pub(super) type HeartbeatAfter = StorageValue<_, BlockNumberFor, ValueQuery>; + pub type HeartbeatAfter = StorageValue<_, BlockNumberFor, ValueQuery>; /// The current set of keys that may issue a heartbeat. #[pallet::storage] - #[pallet::getter(fn keys)] - pub(super) type Keys = + pub type Keys = StorageValue<_, WeakBoundedVec, ValueQuery>; /// For each session index, we keep a mapping of `SessionIndex` and `AuthIndex`. #[pallet::storage] - #[pallet::getter(fn received_heartbeats)] - pub(super) type ReceivedHeartbeats = + pub type ReceivedHeartbeats = StorageDoubleMap<_, Twox64Concat, SessionIndex, Twox64Concat, AuthIndex, bool>; /// For each session index, we keep a mapping of `ValidatorId` to the /// number of blocks authored by the given authority. #[pallet::storage] - #[pallet::getter(fn authored_blocks)] - pub(super) type AuthoredBlocks = StorageDoubleMap< + pub type AuthoredBlocks = StorageDoubleMap< _, Twox64Concat, SessionIndex, diff --git a/substrate/frame/im-online/src/tests.rs b/substrate/frame/im-online/src/tests.rs index 7efca926eb0..6f5a14d7e7f 100644 --- a/substrate/frame/im-online/src/tests.rs +++ b/substrate/frame/im-online/src/tests.rs @@ -26,10 +26,7 @@ use sp_core::offchain::{ testing::{TestOffchainExt, TestTransactionPoolExt}, OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, }; -use sp_runtime::{ - testing::UintAuthorityId, - transaction_validity::{InvalidTransaction, TransactionValidityError}, -}; +use sp_runtime::testing::UintAuthorityId; #[test] fn test_unresponsiveness_slash_fraction() { @@ -267,14 +264,14 @@ fn should_cleanup_received_heartbeats_on_session_end() { let _ = heartbeat(1, 2, 0, 1.into(), Session::validators()).unwrap(); // the heartbeat is stored - assert!(!ImOnline::received_heartbeats(&2, &0).is_none()); + assert!(!super::pallet::ReceivedHeartbeats::::get(2, 0).is_none()); advance_session(); // after the session has ended we have already processed the heartbeat // message, so any messages received on the previous session should have // been pruned. - assert!(ImOnline::received_heartbeats(&2, &0).is_none()); + assert!(super::pallet::ReceivedHeartbeats::::get(2, 0).is_none()); }); } -- GitLab From 02f1f2c47609483be04097e27fc52c06883a6540 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Mon, 11 Mar 2024 18:00:52 +0200 Subject: [PATCH 77/86] Small fixes in para-scheduler pallet (#3524) Fixes some typos, outdated comments and test asserts. Also uses safe math and `defensive` for arithmetic operations. --- polkadot/runtime/parachains/src/scheduler.rs | 23 ++++++++++++------- .../runtime/parachains/src/scheduler/tests.rs | 11 +++++---- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index 61ab36dbe96..f231864a85e 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -37,7 +37,7 @@ //! availability cores over time. use crate::{configuration, initializer::SessionChangeNotification, paras}; -use frame_support::pallet_prelude::*; +use frame_support::{pallet_prelude::*, traits::Defensive}; use frame_system::pallet_prelude::BlockNumberFor; pub use polkadot_core_primitives::v2::BlockNumber; use primitives::{ @@ -59,6 +59,7 @@ pub use pallet::*; mod tests; const LOG_TARGET: &str = "runtime::parachains::scheduler"; + pub mod migration; #[frame_support::pallet] @@ -88,10 +89,8 @@ pub mod pallet { #[pallet::getter(fn validator_groups)] pub(crate) type ValidatorGroups = StorageValue<_, Vec>, ValueQuery>; - /// One entry for each availability core. Entries are `None` if the core is not currently - /// occupied. Can be temporarily `Some` if scheduled but not occupied. - /// The i'th parachain belongs to the i'th core, with the remaining cores all being - /// parathread-multiplexers. + /// One entry for each availability core. The i'th parachain belongs to the i'th core, with the + /// remaining cores all being on demand parachain multiplexers. /// /// Bounded by the maximum of either of these two values: /// * The number of parachains and parathread multiplexers @@ -146,7 +145,7 @@ pub mod pallet { /// One entry for each availability core. The `VecDeque` represents the assignments to be /// scheduled on that core. The value contained here will not be valid after the end of - /// a block. Runtime APIs should be used to determine scheduled cores/ for the upcoming block. + /// a block. Runtime APIs should be used to determine scheduled cores for the upcoming block. #[pallet::storage] #[pallet::getter(fn claimqueue)] pub(crate) type ClaimQueue = @@ -236,8 +235,16 @@ impl Pallet { if n_cores == 0 || validators.is_empty() { ValidatorGroups::::set(Vec::new()); } else { - let group_base_size = validators.len() / n_cores as usize; - let n_larger_groups = validators.len() % n_cores as usize; + let group_base_size = validators + .len() + .checked_div(n_cores as usize) + .defensive_proof("n_cores should not be 0") + .unwrap_or(0); + let n_larger_groups = validators + .len() + .checked_rem(n_cores as usize) + .defensive_proof("n_cores should not be 0") + .unwrap_or(0); // Groups contain indices into the validators from the session change notification, // which are already shuffled. diff --git a/polkadot/runtime/parachains/src/scheduler/tests.rs b/polkadot/runtime/parachains/src/scheduler/tests.rs index 28b3a6b4f5d..e6a1e66b3a5 100644 --- a/polkadot/runtime/parachains/src/scheduler/tests.rs +++ b/polkadot/runtime/parachains/src/scheduler/tests.rs @@ -213,6 +213,7 @@ fn claimqueue_ttl_drop_fn_works() { // Drop expired claim. Scheduler::drop_expired_claims_from_claimqueue(); + assert!(!claimqueue_contains_para_ids::(vec![para_id])); // Add a claim on core 0 with a ttl == now (17) let paras_entry_non_expired = ParasEntry::new(Assignment::Bulk(para_id), now); @@ -222,7 +223,7 @@ fn claimqueue_ttl_drop_fn_works() { Scheduler::add_to_claimqueue(core_idx, paras_entry_expired.clone()); Scheduler::add_to_claimqueue(core_idx, paras_entry_non_expired.clone()); let cq = Scheduler::claimqueue(); - assert!(cq.get(&core_idx).unwrap().len() == 3); + assert_eq!(cq.get(&core_idx).unwrap().len(), 3); // Add a claim to the test assignment provider. let assignment = Assignment::Bulk(para_id); @@ -234,8 +235,9 @@ fn claimqueue_ttl_drop_fn_works() { let cq = Scheduler::claimqueue(); let cqc = cq.get(&core_idx).unwrap(); - // Same number of claims - assert!(cqc.len() == 3); + // Same number of claims, because a new claim is popped from `MockAssigner` instead of the + // expired one + assert_eq!(cqc.len(), 3); // The first 2 claims in the queue should have a ttl of 17, // being the ones set up prior in this test as claims 1 and 3. @@ -355,14 +357,13 @@ fn fill_claimqueue_fills() { schedule_blank_para(para_b); schedule_blank_para(para_c); - // start a new session to activate, 3 validators for 3 cores. + // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { new_config: default_config(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), - ValidatorId::from(Sr25519Keyring::Charlie.public()), ], ..Default::default() }), -- GitLab From 05381afcb2e2015fe7f379ddba7522d595a5e8cc Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Mon, 11 Mar 2024 17:17:45 +0100 Subject: [PATCH 78/86] subsystem-bench: adjust test config to Kusama (#3583) Fixes https://github.com/paritytech/polkadot-sdk/issues/3528 ```rust latency: mean_latency_ms = 30 // common sense std_dev = 2.0 // common sense n_validators = 300 // max number of validators, from chain config n_cores = 60 // 300/5 max_validators_per_core = 5 // default min_pov_size = 5120 // max max_pov_size = 5120 // max peer_bandwidth = 44040192 // from the Parity's kusama validators bandwidth = 44040192 // from the Parity's kusama validators connectivity = 90 // we need to be connected to 90-95% of peers ``` --- ...ilability-distribution-regression-bench.rs | 19 +++------- .../availability-recovery-regression-bench.rs | 13 +------ .../subsystem-bench/src/lib/configuration.rs | 38 ++++++++++++++----- 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs b/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs index f2872f3c72b..7a490d5e47f 100644 --- a/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs +++ b/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs @@ -16,7 +16,7 @@ //! availability-read regression tests //! -//! TODO: Explain the test case after configuration adjusted to Kusama +//! Availability read benchmark based on Kusama parameters and scale. //! //! Subsystems involved: //! - availability-distribution @@ -25,28 +25,19 @@ use polkadot_subsystem_bench::{ availability::{benchmark_availability_write, prepare_test, TestDataAvailability, TestState}, - configuration::{PeerLatency, TestConfiguration}, + configuration::TestConfiguration, usage::BenchmarkUsage, }; const BENCH_COUNT: usize = 3; -const WARM_UP_COUNT: usize = 20; +const WARM_UP_COUNT: usize = 30; const WARM_UP_PRECISION: f64 = 0.01; fn main() -> Result<(), String> { let mut messages = vec![]; - - // TODO: Adjust the test configurations to Kusama values let mut config = TestConfiguration::default(); - config.latency = Some(PeerLatency { mean_latency_ms: 30, std_dev: 2.0 }); - config.n_validators = 1000; - config.n_cores = 200; - config.max_validators_per_core = 5; - config.min_pov_size = 5120; - config.max_pov_size = 5120; - config.peer_bandwidth = 52428800; - config.bandwidth = 52428800; - config.connectivity = 75; + // A single node effort roughly n_cores * needed_approvals / n_validators = 60 * 30 / 300 + config.n_cores = 6; config.num_blocks = 3; config.generate_pov_sizes(); diff --git a/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs index beb063e7ae0..30cc4d47ecc 100644 --- a/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs +++ b/polkadot/node/network/availability-recovery/tests/availability-recovery-regression-bench.rs @@ -16,7 +16,7 @@ //! availability-write regression tests //! -//! TODO: Explain the test case after configuration adjusted to Kusama +//! Availability write benchmark based on Kusama parameters and scale. //! //! Subsystems involved: //! - availability-recovery @@ -26,7 +26,7 @@ use polkadot_subsystem_bench::{ benchmark_availability_read, prepare_test, DataAvailabilityReadOptions, TestDataAvailability, TestState, }, - configuration::{PeerLatency, TestConfiguration}, + configuration::TestConfiguration, usage::BenchmarkUsage, }; @@ -37,18 +37,9 @@ const WARM_UP_PRECISION: f64 = 0.01; fn main() -> Result<(), String> { let mut messages = vec![]; - // TODO: Adjust the test configurations to Kusama values let options = DataAvailabilityReadOptions { fetch_from_backers: true }; let mut config = TestConfiguration::default(); - config.latency = Some(PeerLatency { mean_latency_ms: 100, std_dev: 1.0 }); - config.n_validators = 300; - config.n_cores = 20; - config.min_pov_size = 5120; - config.max_pov_size = 5120; - config.peer_bandwidth = 52428800; - config.bandwidth = 52428800; config.num_blocks = 3; - config.connectivity = 90; config.generate_pov_sizes(); warm_up(config.clone(), options.clone())?; diff --git a/polkadot/node/subsystem-bench/src/lib/configuration.rs b/polkadot/node/subsystem-bench/src/lib/configuration.rs index c7693308527..17c81c4fd35 100644 --- a/polkadot/node/subsystem-bench/src/lib/configuration.rs +++ b/polkadot/node/subsystem-bench/src/lib/configuration.rs @@ -35,19 +35,34 @@ pub struct PeerLatency { pub std_dev: f64, } +// Based on Kusama `max_validators` +fn default_n_validators() -> usize { + 300 +} + +// Based on Kusama cores +fn default_n_cores() -> usize { + 60 +} + // Default PoV size in KiB. fn default_pov_size() -> usize { - 5120 + 5 * 1024 } -// Default bandwidth in bytes +// Default bandwidth in bytes, based stats from Kusama validators fn default_bandwidth() -> usize { - 52428800 + 42 * 1024 * 1024 +} + +// Default peer latency +fn default_peer_latency() -> Option { + Some(PeerLatency { mean_latency_ms: 30, std_dev: 2.0 }) } // Default connectivity percentage fn default_connectivity() -> usize { - 100 + 90 } // Default backing group size @@ -63,6 +78,7 @@ fn default_needed_approvals() -> usize { fn default_zeroth_delay_tranche_width() -> usize { 0 } + fn default_relay_vrf_modulo_samples() -> usize { 6 } @@ -78,8 +94,10 @@ fn default_no_show_slots() -> usize { #[derive(Clone, Debug, Serialize, Deserialize)] pub struct TestConfiguration { /// Number of validators + #[serde(default = "default_n_validators")] pub n_validators: usize, /// Number of cores + #[serde(default = "default_n_cores")] pub n_cores: usize, /// The number of needed votes to approve a candidate. #[serde(default = "default_needed_approvals")] @@ -111,10 +129,10 @@ pub struct TestConfiguration { #[serde(default = "default_bandwidth")] pub bandwidth: usize, /// Optional peer emulation latency (round trip time) wrt node under test - #[serde(default)] + #[serde(default = "default_peer_latency")] pub latency: Option, - /// Connectivity ratio, the percentage of peers we are not connected to, but ar part of - /// the topology. + /// Connectivity ratio, the percentage of peers we are connected to, but as part of the + /// topology. #[serde(default = "default_connectivity")] pub connectivity: usize, /// Number of blocks to run the test for @@ -124,8 +142,8 @@ pub struct TestConfiguration { impl Default for TestConfiguration { fn default() -> Self { Self { - n_validators: Default::default(), - n_cores: Default::default(), + n_validators: default_n_validators(), + n_cores: default_n_cores(), needed_approvals: default_needed_approvals(), zeroth_delay_tranche_width: default_zeroth_delay_tranche_width(), relay_vrf_modulo_samples: default_relay_vrf_modulo_samples(), @@ -137,7 +155,7 @@ impl Default for TestConfiguration { pov_sizes: Default::default(), peer_bandwidth: default_bandwidth(), bandwidth: default_bandwidth(), - latency: Default::default(), + latency: default_peer_latency(), connectivity: default_connectivity(), num_blocks: Default::default(), } -- GitLab From d3f81056ad1054731e711f2761673ef72686697f Mon Sep 17 00:00:00 2001 From: philoniare Date: Tue, 12 Mar 2024 04:39:21 +0800 Subject: [PATCH 79/86] [Deprecation] Remove the deprecated Store trait (#3532) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description *Removes the deprecated `trait Store` feature from the code base* Fixes #222 --------- Co-authored-by: DΓ³nal Murray --- prdoc/pr_3532.prdoc | 15 +++++ substrate/frame/support/procedural/src/lib.rs | 10 --- .../procedural/src/pallet/expand/mod.rs | 3 - .../src/pallet/expand/store_trait.rs | 67 ------------------- .../procedural/src/pallet/parse/mod.rs | 2 - .../src/pallet/parse/pallet_struct.rs | 26 +------ substrate/frame/support/src/lib.rs | 24 +------ .../tests/pallet_ui/deprecated_store_attr.rs | 28 -------- .../pallet_ui/deprecated_store_attr.stderr | 10 --- .../tests/pallet_ui/duplicate_store_attr.rs | 42 ------------ .../pallet_ui/duplicate_store_attr.stderr | 5 -- .../pallet_struct_invalid_attr.stderr | 2 +- .../pallet_ui/store_trait_leak_private.rs | 42 ------------ .../pallet_ui/store_trait_leak_private.stderr | 19 ------ .../support/test/tests/storage_transaction.rs | 1 - 15 files changed, 20 insertions(+), 276 deletions(-) create mode 100644 prdoc/pr_3532.prdoc delete mode 100644 substrate/frame/support/procedural/src/pallet/expand/store_trait.rs delete mode 100644 substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.rs delete mode 100644 substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.stderr delete mode 100644 substrate/frame/support/test/tests/pallet_ui/duplicate_store_attr.rs delete mode 100644 substrate/frame/support/test/tests/pallet_ui/duplicate_store_attr.stderr delete mode 100644 substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.rs delete mode 100644 substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr diff --git a/prdoc/pr_3532.prdoc b/prdoc/pr_3532.prdoc new file mode 100644 index 00000000000..d47c8d1d491 --- /dev/null +++ b/prdoc/pr_3532.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove deprecated `trait Store` + +doc: + - audience: Runtime Dev + description: | + The deprecated `trait Store` feature has been removed from the codebase. Please remove usages of `generate_store` + macro from your pallets and access the storage through generics. For example, + `::StoredRange::mutate` will need to be updated to `StoredRange::::mutate`. + +crates: + - name: frame-support + - name: frame \ No newline at end of file diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 036da52a7ba..89e39a5c9bb 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -858,16 +858,6 @@ pub fn disable_frame_system_supertrait_check(_: TokenStream, _: TokenStream) -> pallet_macro_stub() } -/// -/// --- -/// -/// Rust-Analyzer Users: Documentation for this macro can be found at -/// `frame_support::pallet_macros::generate_store`. -#[proc_macro_attribute] -pub fn generate_store(_: TokenStream, _: TokenStream) -> TokenStream { - pallet_macro_stub() -} - /// /// --- /// diff --git a/substrate/frame/support/procedural/src/pallet/expand/mod.rs b/substrate/frame/support/procedural/src/pallet/expand/mod.rs index 3da7d9293c7..067839c2846 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/mod.rs @@ -31,7 +31,6 @@ mod instances; mod origin; mod pallet_struct; mod storage; -mod store_trait; mod tasks; mod tt_default_parts; mod type_value; @@ -68,7 +67,6 @@ pub fn expand(mut def: Def) -> proc_macro2::TokenStream { let storages = storage::expand_storages(&mut def); let inherents = inherent::expand_inherents(&mut def); let instances = instances::expand_instances(&mut def); - let store_trait = store_trait::expand_store_trait(&mut def); let hooks = hooks::expand_hooks(&mut def); let genesis_build = genesis_build::expand_genesis_build(&mut def); let genesis_config = genesis_config::expand_genesis_config(&mut def); @@ -110,7 +108,6 @@ storage item. Otherwise, all storage items are listed among [*Type Definitions*] #storages #inherents #instances - #store_trait #hooks #genesis_build #genesis_config diff --git a/substrate/frame/support/procedural/src/pallet/expand/store_trait.rs b/substrate/frame/support/procedural/src/pallet/expand/store_trait.rs deleted file mode 100644 index 6635adc9881..00000000000 --- a/substrate/frame/support/procedural/src/pallet/expand/store_trait.rs +++ /dev/null @@ -1,67 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::pallet::Def; -use syn::spanned::Spanned; - -/// If attribute `#[pallet::generate_store(..)]` is defined then: -/// * generate Store trait with all storages, -/// * implement Store trait for Pallet. -pub fn expand_store_trait(def: &mut Def) -> proc_macro2::TokenStream { - let (trait_vis, trait_store, attribute_span) = - if let Some(store) = &def.pallet_struct.store { store } else { return Default::default() }; - - let type_impl_gen = &def.type_impl_generics(trait_store.span()); - let type_use_gen = &def.type_use_generics(trait_store.span()); - let pallet_ident = &def.pallet_struct.pallet; - - let mut where_clauses = vec![&def.config.where_clause]; - where_clauses.extend(def.storages.iter().map(|storage| &storage.where_clause)); - let completed_where_clause = super::merge_where_clauses(&where_clauses); - - let storage_names = &def.storages.iter().map(|storage| &storage.ident).collect::>(); - let storage_cfg_attrs = - &def.storages.iter().map(|storage| &storage.cfg_attrs).collect::>(); - let warnig_struct_name = syn::Ident::new("Store", *attribute_span); - let warning: syn::ItemStruct = syn::parse_quote!( - #[deprecated(note = r" - Use of `#[pallet::generate_store(pub(super) trait Store)]` will be removed after July 2023. - Check https://github.com/paritytech/substrate/pull/13535 for more details.")] - struct #warnig_struct_name; - ); - - quote::quote_spanned!(trait_store.span() => - const _:() = { - #warning - const _: Option<#warnig_struct_name> = None; - }; - #trait_vis trait #trait_store { - #( - #(#storage_cfg_attrs)* - type #storage_names; - )* - } - impl<#type_impl_gen> #trait_store for #pallet_ident<#type_use_gen> - #completed_where_clause - { - #( - #(#storage_cfg_attrs)* - type #storage_names = #storage_names<#type_use_gen>; - )* - } - ) -} diff --git a/substrate/frame/support/procedural/src/pallet/parse/mod.rs b/substrate/frame/support/procedural/src/pallet/parse/mod.rs index e1efdbcc202..b55f130a93a 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/mod.rs @@ -558,8 +558,6 @@ mod keyword { syn::custom_keyword!(validate_unsigned); syn::custom_keyword!(type_value); syn::custom_keyword!(pallet); - syn::custom_keyword!(generate_store); - syn::custom_keyword!(Store); syn::custom_keyword!(extra_constants); syn::custom_keyword!(composite_enum); } diff --git a/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs b/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs index c2855ae38ef..b645760998f 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs @@ -23,10 +23,8 @@ use syn::spanned::Spanned; mod keyword { syn::custom_keyword!(pallet); syn::custom_keyword!(Pallet); - syn::custom_keyword!(generate_store); syn::custom_keyword!(without_storage_info); syn::custom_keyword!(storage_version); - syn::custom_keyword!(Store); } /// Definition of the pallet pallet. @@ -37,8 +35,6 @@ pub struct PalletStructDef { pub instances: Vec, /// The keyword Pallet used (contains span). pub pallet: keyword::Pallet, - /// Whether the trait `Store` must be generated. - pub store: Option<(syn::Visibility, keyword::Store, proc_macro2::Span)>, /// The span of the pallet::pallet attribute. pub attr_span: proc_macro2::Span, /// Whether to specify the storages max encoded len when implementing `StorageInfoTrait`. @@ -49,11 +45,9 @@ pub struct PalletStructDef { } /// Parse for one variant of: -/// * `#[pallet::generate_store($vis trait Store)]` /// * `#[pallet::without_storage_info]` /// * `#[pallet::storage_version(STORAGE_VERSION)]` pub enum PalletStructAttr { - GenerateStore { span: proc_macro2::Span, vis: syn::Visibility, keyword: keyword::Store }, WithoutStorageInfoTrait(proc_macro2::Span), StorageVersion { storage_version: syn::Path, span: proc_macro2::Span }, } @@ -61,9 +55,7 @@ pub enum PalletStructAttr { impl PalletStructAttr { fn span(&self) -> proc_macro2::Span { match self { - Self::GenerateStore { span, .. } | - Self::WithoutStorageInfoTrait(span) | - Self::StorageVersion { span, .. } => *span, + Self::WithoutStorageInfoTrait(span) | Self::StorageVersion { span, .. } => *span, } } } @@ -77,16 +69,7 @@ impl syn::parse::Parse for PalletStructAttr { content.parse::()?; let lookahead = content.lookahead1(); - if lookahead.peek(keyword::generate_store) { - content.parse::()?; - let generate_content; - syn::parenthesized!(generate_content in content); - let vis = generate_content.parse::()?; - generate_content.parse::()?; - let keyword = generate_content.parse::()?; - let span = content.span(); - Ok(Self::GenerateStore { vis, keyword, span }) - } else if lookahead.peek(keyword::without_storage_info) { + if lookahead.peek(keyword::without_storage_info) { let span = content.parse::()?.span(); Ok(Self::WithoutStorageInfoTrait(span)) } else if lookahead.peek(keyword::storage_version) { @@ -116,16 +99,12 @@ impl PalletStructDef { return Err(syn::Error::new(item.span(), msg)) }; - let mut store = None; let mut without_storage_info = None; let mut storage_version_found = None; let struct_attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; for attr in struct_attrs { match attr { - PalletStructAttr::GenerateStore { vis, keyword, span } if store.is_none() => { - store = Some((vis, keyword, span)); - }, PalletStructAttr::WithoutStorageInfoTrait(span) if without_storage_info.is_none() => { @@ -162,7 +141,6 @@ impl PalletStructDef { index, instances, pallet, - store, attr_span, without_storage_info, storage_version: storage_version_found, diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 6ea7fa33e77..07560abd935 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -702,7 +702,7 @@ pub use frame_support_procedural::crate_to_crate_version; #[macro_export] macro_rules! fail { ( $y:expr ) => {{ - return Err($y.into()) + return Err($y.into()); }}; } @@ -949,7 +949,7 @@ pub mod pallet_prelude { /// pub trait Config: frame_system::Config {} /// } /// ``` -/// +// /// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. /// /// ## Macro expansion: @@ -1341,26 +1341,6 @@ pub mod pallet_macros { /// See [`pallet::storage`](`frame_support::pallet_macros::storage`) for more info. pub use frame_support_procedural::getter; - /// Allows generating the `Store` trait for all storages. - /// - /// DEPRECATED: Will be removed, do not use. - /// See for more details. - /// - /// To generate a `Store` trait associating all storages, annotate your `Pallet` struct - /// with the attribute `#[pallet::generate_store($vis trait Store)]`, e.g.: - /// - /// ```ignore - /// #[pallet::pallet] - /// #[pallet::generate_store(pub(super) trait Store)] - /// pub struct Pallet(_); - /// ``` - /// More precisely, the `Store` trait contains an associated type for each storage. It is - /// implemented for `Pallet` allowing access to the storage from pallet struct. - /// - /// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using - /// `::Foo`. - pub use frame_support_procedural::generate_store; - /// Defines constants that are added to the constant field of /// [`PalletMetadata`](frame_metadata::v15::PalletMetadata) struct for this pallet. /// diff --git a/substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.rs b/substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.rs deleted file mode 100644 index 72ad7896dfe..00000000000 --- a/substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.rs +++ /dev/null @@ -1,28 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#[frame_support::pallet] -mod pallet { - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::pallet] - #[pallet::generate_store(trait Store)] - pub struct Pallet(core::marker::PhantomData); -} - -fn main() {} diff --git a/substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.stderr b/substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.stderr deleted file mode 100644 index e227033d364..00000000000 --- a/substrate/frame/support/test/tests/pallet_ui/deprecated_store_attr.stderr +++ /dev/null @@ -1,10 +0,0 @@ -error: use of deprecated struct `pallet::_::Store`: - Use of `#[pallet::generate_store(pub(super) trait Store)]` will be removed after July 2023. - Check https://github.com/paritytech/substrate/pull/13535 for more details. - --> tests/pallet_ui/deprecated_store_attr.rs:24:3 - | -24 | #[pallet::generate_store(trait Store)] - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | - = note: `-D deprecated` implied by `-D warnings` - = help: to override `-D warnings` add `#[allow(deprecated)]` diff --git a/substrate/frame/support/test/tests/pallet_ui/duplicate_store_attr.rs b/substrate/frame/support/test/tests/pallet_ui/duplicate_store_attr.rs deleted file mode 100644 index 334fd8a46af..00000000000 --- a/substrate/frame/support/test/tests/pallet_ui/duplicate_store_attr.rs +++ /dev/null @@ -1,42 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#[frame_support::pallet] -mod pallet { - use frame_support::pallet_prelude::Hooks; - use frame_system::pallet_prelude::BlockNumberFor; - use frame_support::pallet_prelude::StorageValue; - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::pallet] - #[pallet::generate_store(trait Store)] - #[pallet::generate_store(trait Store)] - pub struct Pallet(core::marker::PhantomData); - - #[pallet::hooks] - impl Hooks> for Pallet {} - - #[pallet::call] - impl Pallet {} - - #[pallet::storage] - type Foo = StorageValue<_, u8>; -} - -fn main() {} diff --git a/substrate/frame/support/test/tests/pallet_ui/duplicate_store_attr.stderr b/substrate/frame/support/test/tests/pallet_ui/duplicate_store_attr.stderr deleted file mode 100644 index 864b399326e..00000000000 --- a/substrate/frame/support/test/tests/pallet_ui/duplicate_store_attr.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: Unexpected duplicated attribute - --> tests/pallet_ui/duplicate_store_attr.rs:29:3 - | -29 | #[pallet::generate_store(trait Store)] - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/pallet_struct_invalid_attr.stderr b/substrate/frame/support/test/tests/pallet_ui/pallet_struct_invalid_attr.stderr index 33a2d1da786..6f7f5617f7e 100644 --- a/substrate/frame/support/test/tests/pallet_ui/pallet_struct_invalid_attr.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/pallet_struct_invalid_attr.stderr @@ -1,4 +1,4 @@ -error: expected one of: `generate_store`, `without_storage_info`, `storage_version` +error: expected `without_storage_info` or `storage_version` --> tests/pallet_ui/pallet_struct_invalid_attr.rs:24:12 | 24 | #[pallet::generate_storage_info] // invalid diff --git a/substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.rs b/substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.rs deleted file mode 100644 index 55dd315fb29..00000000000 --- a/substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.rs +++ /dev/null @@ -1,42 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#[frame_support::pallet] -mod pallet { - use frame_support::pallet_prelude::Hooks; - use frame_system::pallet_prelude::BlockNumberFor; - use frame_support::pallet_prelude::StorageValue; - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::pallet] - #[pallet::generate_store(pub trait Store)] - pub struct Pallet(core::marker::PhantomData); - - #[pallet::hooks] - impl Hooks> for Pallet {} - - #[pallet::call] - impl Pallet {} - - #[pallet::storage] - type Foo = StorageValue<_, u8>; -} - -fn main() { -} diff --git a/substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr b/substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr deleted file mode 100644 index ccb55122e81..00000000000 --- a/substrate/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr +++ /dev/null @@ -1,19 +0,0 @@ -error: use of deprecated struct `pallet::_::Store`: - Use of `#[pallet::generate_store(pub(super) trait Store)]` will be removed after July 2023. - Check https://github.com/paritytech/substrate/pull/13535 for more details. - --> tests/pallet_ui/store_trait_leak_private.rs:28:3 - | -28 | #[pallet::generate_store(pub trait Store)] - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | - = note: `-D deprecated` implied by `-D warnings` - = help: to override `-D warnings` add `#[allow(deprecated)]` - -error[E0446]: private type `_GeneratedPrefixForStorageFoo` in public interface - --> tests/pallet_ui/store_trait_leak_private.rs:28:37 - | -28 | #[pallet::generate_store(pub trait Store)] - | ^^^^^ can't leak private type -... -37 | #[pallet::storage] - | ------- `_GeneratedPrefixForStorageFoo` declared as private diff --git a/substrate/frame/support/test/tests/storage_transaction.rs b/substrate/frame/support/test/tests/storage_transaction.rs index c4774330860..f1489a8b0a6 100644 --- a/substrate/frame/support/test/tests/storage_transaction.rs +++ b/substrate/frame/support/test/tests/storage_transaction.rs @@ -41,7 +41,6 @@ pub mod pallet { use frame_system::pallet_prelude::*; #[pallet::pallet] - #[pallet::generate_store(pub (super) trait Store)] pub struct Pallet(_); #[pallet::config] -- GitLab From 7839400f1203b1c1de2ff84db5f5ffcf31e20d87 Mon Sep 17 00:00:00 2001 From: georgepisaltu <52418509+georgepisaltu@users.noreply.github.com> Date: Tue, 12 Mar 2024 00:12:58 +0200 Subject: [PATCH 80/86] [Collator Selection] Fix weight refund for `set_candidacy_bond` (#3643) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #3642 This PR implements the weight refund of `pallet_collator_selection::set_candidacy_bond` to account for no iterations when the bond is decreased. --------- Signed-off-by: georgepisaltu Co-authored-by: Bastian KΓΆcher --- cumulus/pallets/collator-selection/src/lib.rs | 6 +++++- prdoc/pr_3643.prdoc | 12 ++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 prdoc/pr_3643.prdoc diff --git a/cumulus/pallets/collator-selection/src/lib.rs b/cumulus/pallets/collator-selection/src/lib.rs index fb4c4a445df..62c00737f91 100644 --- a/cumulus/pallets/collator-selection/src/lib.rs +++ b/cumulus/pallets/collator-selection/src/lib.rs @@ -495,7 +495,11 @@ pub mod pallet { }) .unwrap_or_default(); Self::deposit_event(Event::NewCandidacyBond { bond_amount: bond }); - Ok(Some(T::WeightInfo::set_candidacy_bond(initial_len as u32, kicked as u32)).into()) + Ok(Some(T::WeightInfo::set_candidacy_bond( + bond_increased.then(|| initial_len as u32).unwrap_or_default(), + kicked as u32, + )) + .into()) } /// Register this account as a collator candidate. The account must (a) already have diff --git a/prdoc/pr_3643.prdoc b/prdoc/pr_3643.prdoc new file mode 100644 index 00000000000..6e85af8d99a --- /dev/null +++ b/prdoc/pr_3643.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix weight refund for `pallet_collator_selection::set_candidacy_bond` + +doc: + - audience: Runtime Dev + description: | + This PR implements the weight refund of `pallet_collator_selection::set_candidacy_bond` to + account for no iterations over the candidate list when the candidacy bond is decreased. +crates: + - name: pallet-collator-selection -- GitLab From 7a644fa082f09b557a8a198ed56412f02062bbcf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Mar 2024 23:20:48 +0100 Subject: [PATCH 81/86] Bump handlebars from 4.3.7 to 5.1.0 (#3248) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [handlebars](https://github.com/sunng87/handlebars-rust) from 4.3.7 to 5.1.0.
Release notes

Sourced from handlebars's releases.

v5.1.0

What's Changed

New Contributors

Full Changelog: https://github.com/sunng87/handlebars-rust/compare/v5.0.0...v5.1.0

v5.0.0

5.0.0

A semver major release that introduces some API breaking changes.

Highlights

  • RenderError has been rewritten for typed error reason. In previous versions we use string message for RenderError which is impossible to handle with code. This version introduces RenderErrorReason so you can use match to deal various error reasons.
  • Lifetime in Helper trait has been simplified.

Changes compared to 4.3

  • [Added] public mutable access to local variables in BlockContext #533
  • [Changed] Simplified lifetime specifiers for Helper, ScopedJson and some other related types and functions. #532
  • [Changed] Updated TemplateError to reduce its size. Direct field access is removed in favor of access methods
  • [Changed] Introducing RenderErrorReason for typed render error
  • [Changed] Changed register_template_directory api for more customizations #[610]
  • [Changed] Updated rust-embed to 8.0

Collaboration Wanted

I'm looking for collaborations to join the development with me on this project. Contact via email if your are interested in.

Auto-generated changelog

... (truncated)

Changelog

Sourced from handlebars's changelog.

5.1.0 - 2024-01-17

  • [Added] Chained else if block support #629

5.0.0 - 2023-12-31

  • [Added] public mutable access to local variables in BlockContext #533
  • [Changed] Simplified lifetime specifiers for Helper, ScopedJson and some other related types and functions. #532
  • [Changed] Updated TemplateError to reduce its size. Direct field access is removed in favor of access methods
  • [Changed] Introducing RenderErrorReason for typed render error
  • [Changed] Changed register_template_directory api for more customizations #[610]
  • [Changed] Updated rust-embed to 8.0

4.3.4 - 2022-09-11

  • [Added] New write_fmt function for Output #522
  • [Added] reason() method for TemplateError to access underlying reason, this replaces original direct .reason access.
  • [Changed] Direct access to TemplateError's reason field is depreacted will be removed in future.

4.3.3 - 2022-07-20

  • [Fixed] Disable partial expression indentation with {{~> partial}} to bring behavior closer in line with original javascript version. #518
  • [Fixed] Support for using partial context together with partial parameters #520

4.3.2 - 2022-07-14

  • [Added] Render functions that reuse Context for custom std::io::Write: render_with_context_to_write and render_template_with_context_to_write

4.3.1 - 2022-06-09

  • [Added] Added support for {{~{variable}~}} syntax #509

4.3.0 - 2022-05-18

  • [Changed] update MSRV to 1.57 as rhai requires
  • [Fixed] Reimplemented indent support for partial expression {{> partial}}, which is introduced in 4.2.0. The new implementation is aligned with original javascript version, that every text line generated from partial are indented as {{> partial}} does. prevent_indent will turn-off this feature. #505
  • [Changed] changed error support library from quick_error to thiserror

... (truncated)

Commits
  • d8d9a78 chore: Release handlebars version 5.1.0
  • 137bce5 chore: minor cleanup for chained else support in #629
  • e30d8ab Merge pull request #629 from progmboy/else_chain
  • 8f16353 format code
  • 786d132 add else chain support
  • 23672e8 Merge pull request #628 from sunng87/dependabot/npm_and_yarn/playground/www/f...
  • b849efd chore(deps-dev): bump follow-redirects in /playground/www
  • 7071c9d test: add test for error reason
  • 4664a34 (cargo-release) version 5.0.0
  • ca27748 Merge pull request #625 from sunng87/refactor/render-error-reason-2
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=handlebars&package-manager=cargo&previous-version=4.3.7&new-version=5.1.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
> **Note** > Automatic rebases have been disabled on this pull request as it has been open for over 30 days. Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Bastian KΓΆcher --- Cargo.lock | 4 ++-- substrate/utils/frame/benchmarking-cli/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0b7ea71a536..7eaf3e44a1b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6256,9 +6256,9 @@ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "handlebars" -version = "4.3.7" +version = "5.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" +checksum = "ab283476b99e66691dee3f1640fea91487a8d81f50fb5ecc75538f8f8879a1e4" dependencies = [ "log", "pest", diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index a2db83052c5..a25ab3a8f5b 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -21,7 +21,7 @@ chrono = "0.4" clap = { version = "4.5.1", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1" } comfy-table = { version = "7.1.0", default-features = false } -handlebars = "4.2.2" +handlebars = "5.1.0" Inflector = "0.11.4" itertools = "0.10.3" lazy_static = "1.4.0" -- GitLab From 7315a9b8fdc18848025a83f18c643573d9be9e1e Mon Sep 17 00:00:00 2001 From: gupnik Date: Tue, 12 Mar 2024 10:12:58 +0530 Subject: [PATCH 82/86] Adds default config for assets pallet (#3637) Step in https://github.com/paritytech/polkadot-sdk/issues/171 --- substrate/frame/assets/src/lib.rs | 43 +++++++++++++++++++++++++++++- substrate/frame/assets/src/mock.rs | 16 +---------- 2 files changed, 43 insertions(+), 16 deletions(-) diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index 60316f8cdcf..09d59ae1b8b 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -226,10 +226,42 @@ pub mod pallet { } } - #[pallet::config] + /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. + pub mod config_preludes { + use super::*; + use frame_support::{derive_impl, traits::ConstU64}; + pub struct TestDefaultConfig; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig, no_aggregated_types)] + impl frame_system::DefaultConfig for TestDefaultConfig {} + + #[frame_support::register_default_impl(TestDefaultConfig)] + impl DefaultConfig for TestDefaultConfig { + #[inject_runtime_type] + type RuntimeEvent = (); + type Balance = u64; + type RemoveItemsLimit = ConstU32<5>; + type AssetId = u32; + type AssetIdParameter = u32; + type AssetDeposit = ConstU64<1>; + type AssetAccountDeposit = ConstU64<10>; + type MetadataDepositBase = ConstU64<1>; + type MetadataDepositPerByte = ConstU64<1>; + type ApprovalDeposit = ConstU64<1>; + type StringLimit = ConstU32<50>; + type Extra = (); + type CallbackHandle = (); + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); + } + } + + #[pallet::config(with_default)] /// The module configuration trait. pub trait Config: frame_system::Config { /// The overarching event type. + #[pallet::no_default_bounds] type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -262,10 +294,12 @@ pub mod pallet { type AssetIdParameter: Parameter + From + Into + MaxEncodedLen; /// The currency mechanism. + #[pallet::no_default] type Currency: ReservableCurrency; /// Standard asset class creation is only allowed if the origin attempting it and the /// asset class are in this set. + #[pallet::no_default] type CreateOrigin: EnsureOriginWithArg< Self::RuntimeOrigin, Self::AssetId, @@ -274,28 +308,34 @@ pub mod pallet { /// The origin which may forcibly create or destroy an asset or otherwise alter privileged /// attributes. + #[pallet::no_default] type ForceOrigin: EnsureOrigin; /// The basic amount of funds that must be reserved for an asset. #[pallet::constant] + #[pallet::no_default_bounds] type AssetDeposit: Get>; /// The amount of funds that must be reserved for a non-provider asset account to be /// maintained. #[pallet::constant] + #[pallet::no_default_bounds] type AssetAccountDeposit: Get>; /// The basic amount of funds that must be reserved when adding metadata to your asset. #[pallet::constant] + #[pallet::no_default_bounds] type MetadataDepositBase: Get>; /// The additional funds that must be reserved for the number of bytes you store in your /// metadata. #[pallet::constant] + #[pallet::no_default_bounds] type MetadataDepositPerByte: Get>; /// The amount of funds that must be reserved when creating a new approval. #[pallet::constant] + #[pallet::no_default_bounds] type ApprovalDeposit: Get>; /// The maximum length of a name or symbol stored on-chain. @@ -304,6 +344,7 @@ pub mod pallet { /// A hook to allow a per-asset, per-account minimum balance to be enforced. This must be /// respected in all permissionless operations. + #[pallet::no_default] type Freezer: FrozenBalance; /// Additional data to be stored with an account's asset balance. diff --git a/substrate/frame/assets/src/mock.rs b/substrate/frame/assets/src/mock.rs index e1722200c35..906febb0214 100644 --- a/substrate/frame/assets/src/mock.rs +++ b/substrate/frame/assets/src/mock.rs @@ -108,27 +108,13 @@ impl AssetsCallbackHandle { } } +#[derive_impl(crate::config_preludes::TestDefaultConfig)] impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type Balance = u64; - type AssetId = u32; - type AssetIdParameter = u32; type Currency = Balances; type CreateOrigin = AsEnsureOriginWithArg>; type ForceOrigin = frame_system::EnsureRoot; - type AssetDeposit = ConstU64<1>; - type AssetAccountDeposit = ConstU64<10>; - type MetadataDepositBase = ConstU64<1>; - type MetadataDepositPerByte = ConstU64<1>; - type ApprovalDeposit = ConstU64<1>; - type StringLimit = ConstU32<50>; type Freezer = TestFreezer; - type WeightInfo = (); type CallbackHandle = AssetsCallbackHandle; - type Extra = (); - type RemoveItemsLimit = ConstU32<5>; - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = (); } use std::collections::HashMap; -- GitLab From b0f34e4b2953f814254f92c8d151f22ffcf9c034 Mon Sep 17 00:00:00 2001 From: Koute Date: Tue, 12 Mar 2024 14:23:06 +0900 Subject: [PATCH 83/86] Add a PolkaVM-based executor (#3458) This PR adds a new PolkaVM-based executor to Substrate. - The executor can now be used to actually run a PolkaVM-based runtime, and successfully produces blocks. - The executor is always compiled-in, but is disabled by default. - The `SUBSTRATE_ENABLE_POLKAVM` environment variable must be set to `1` to enable the executor, in which case the node will accept both WASM and PolkaVM program blobs (otherwise it'll default to WASM-only). This is deliberately undocumented and not explicitly exposed anywhere (e.g. in the command line arguments, or in the API) to disincentivize anyone from enabling it in production. If/when we'll move this into production usage I'll remove the environment variable and do it "properly". - I did not use our legacy runtime allocator for the PolkaVM executor, so currently every allocation inside of the runtime will leak guest memory until that particular instance is destroyed. The idea here is that I will work on the https://github.com/polkadot-fellows/RFCs/pull/4 which will remove the need for the legacy allocator under WASM, and that will also allow us to use a proper non-leaking allocator under PolkaVM. - I also did some minor cleanups of the WASM executor and deleted some dead code. No prdocs included since this is not intended to be an end-user feature, but an unofficial experiment, and shouldn't affect any current production user. Once this is production-ready a full Polkadot Fellowship RFC will be necessary anyway. --- Cargo.lock | 74 ++++- Cargo.toml | 6 +- .../core/pvf/common/src/executor_interface.rs | 4 +- substrate/client/executor/Cargo.toml | 1 + substrate/client/executor/common/Cargo.toml | 1 + substrate/client/executor/common/src/error.rs | 18 ++ substrate/client/executor/common/src/lib.rs | 4 + .../common/src/runtime_blob/runtime_blob.rs | 153 +++++----- .../executor/common/src/wasm_runtime.rs | 50 +--- substrate/client/executor/polkavm/Cargo.toml | 23 ++ substrate/client/executor/polkavm/README.md | 1 + substrate/client/executor/polkavm/src/lib.rs | 261 ++++++++++++++++++ substrate/client/executor/src/wasm_runtime.rs | 4 + .../executor/wasmtime/src/instance_wrapper.rs | 148 ++-------- .../client/executor/wasmtime/src/runtime.rs | 19 +- substrate/primitives/io/Cargo.toml | 3 + .../primitives/io/src/global_alloc_riscv.rs | 19 ++ .../primitives/io/src/global_alloc_wasm.rs | 34 +++ substrate/primitives/io/src/lib.rs | 34 +-- .../utils/wasm-builder/src/wasm_project.rs | 2 +- 20 files changed, 559 insertions(+), 300 deletions(-) create mode 100644 substrate/client/executor/polkavm/Cargo.toml create mode 100644 substrate/client/executor/polkavm/README.md create mode 100644 substrate/client/executor/polkavm/src/lib.rs create mode 100644 substrate/primitives/io/src/global_alloc_riscv.rs create mode 100644 substrate/primitives/io/src/global_alloc_wasm.rs diff --git a/Cargo.lock b/Cargo.lock index 7eaf3e44a1b..5eab3cf7c24 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13805,6 +13805,28 @@ dependencies = [ "westend-runtime", ] +[[package]] +name = "polkavm" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a3693e5efdb2bf74e449cd25fd777a28bd7ed87e41f5d5da75eb31b4de48b94" +dependencies = [ + "libc", + "log", + "polkavm-assembler", + "polkavm-common 0.9.0", + "polkavm-linux-raw", +] + +[[package]] +name = "polkavm-assembler" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa96d6d868243acc12de813dd48e756cbadcc8e13964c70d272753266deadc1" +dependencies = [ + "log", +] + [[package]] name = "polkavm-common" version = "0.5.0" @@ -13813,9 +13835,12 @@ checksum = "88b4e215c80fe876147f3d58158d5dfeae7dabdd6047e175af77095b78d0035c" [[package]] name = "polkavm-common" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c99f7eee94e7be43ba37eef65ad0ee8cbaf89b7c00001c3f6d2be985cb1817" +checksum = "1d9428a5cfcc85c5d7b9fc4b6a18c4b802d0173d768182a51cc7751640f08b92" +dependencies = [ + "log", +] [[package]] name = "polkavm-derive" @@ -13829,9 +13854,9 @@ dependencies = [ [[package]] name = "polkavm-derive" -version = "0.8.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79fa916f7962348bd1bb1a65a83401675e6fc86c51a0fdbcf92a3108e58e6125" +checksum = "ae8c4bea6f3e11cd89bb18bcdddac10bd9a24015399bd1c485ad68a985a19606" dependencies = [ "polkavm-derive-impl-macro", ] @@ -13850,11 +13875,11 @@ dependencies = [ [[package]] name = "polkavm-derive-impl" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c10b2654a8a10a83c260bfb93e97b262cf0017494ab94a65d389e0eda6de6c9c" +checksum = "5c4fdfc49717fb9a196e74a5d28e0bc764eb394a2c803eb11133a31ac996c60c" dependencies = [ - "polkavm-common 0.8.0", + "polkavm-common 0.9.0", "proc-macro2", "quote", "syn 2.0.50", @@ -13862,11 +13887,11 @@ dependencies = [ [[package]] name = "polkavm-derive-impl-macro" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e85319a0d5129dc9f021c62607e0804f5fb777a05cdda44d750ac0732def66" +checksum = "8ba81f7b5faac81e528eb6158a6f3c9e0bb1008e0ffa19653bc8dea925ecb429" dependencies = [ - "polkavm-derive-impl 0.8.0", + "polkavm-derive-impl 0.9.0", "syn 2.0.50", ] @@ -13887,19 +13912,25 @@ dependencies = [ [[package]] name = "polkavm-linker" -version = "0.8.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdec1451cb18261d5d01de82acc15305e417fb59588cdcb3127d3dcc9672b925" +checksum = "9c7be503e60cf56c0eb785f90aaba4b583b36bff00e93997d93fef97f9553c39" dependencies = [ "gimli 0.28.0", "hashbrown 0.14.3", "log", "object 0.32.2", - "polkavm-common 0.8.0", + "polkavm-common 0.9.0", "regalloc2 0.9.3", "rustc-demangle", ] +[[package]] +name = "polkavm-linux-raw" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26e85d3456948e650dff0cfc85603915847faf893ed1e66b020bb82ef4557120" + [[package]] name = "polling" version = "2.8.0" @@ -16112,6 +16143,7 @@ dependencies = [ "paste", "regex", "sc-executor-common", + "sc-executor-polkavm", "sc-executor-wasmtime", "sc-runtime-test", "sc-tracing", @@ -16141,6 +16173,7 @@ dependencies = [ name = "sc-executor-common" version = "0.29.0" dependencies = [ + "polkavm", "sc-allocator", "sp-maybe-compressed-blob", "sp-wasm-interface 20.0.0", @@ -16148,6 +16181,16 @@ dependencies = [ "wasm-instrument", ] +[[package]] +name = "sc-executor-polkavm" +version = "0.29.0" +dependencies = [ + "log", + "polkavm", + "sc-executor-common", + "sp-wasm-interface 20.0.0", +] + [[package]] name = "sc-executor-wasmtime" version = "0.29.0" @@ -18781,6 +18824,7 @@ dependencies = [ "libsecp256k1", "log", "parity-scale-codec", + "polkavm-derive 0.9.1", "rustversion", "secp256k1", "sp-core", @@ -18973,7 +19017,7 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec", - "polkavm-derive 0.8.0", + "polkavm-derive 0.9.1", "primitive-types", "rustversion", "sp-core", @@ -19995,7 +20039,7 @@ dependencies = [ "console", "filetime", "parity-wasm", - "polkavm-linker 0.8.2", + "polkavm-linker 0.9.2", "sp-maybe-compressed-blob", "strum 0.24.1", "tempfile", diff --git a/Cargo.toml b/Cargo.toml index 80068596685..f256d02808a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -249,6 +249,7 @@ members = [ "substrate/client/db", "substrate/client/executor", "substrate/client/executor/common", + "substrate/client/executor/polkavm", "substrate/client/executor/runtime-test", "substrate/client/executor/wasmtime", "substrate/client/informant", @@ -543,8 +544,9 @@ extra-unused-type-parameters = { level = "allow", priority = 2 } # stylistic default_constructed_unit_structs = { level = "allow", priority = 2 } # stylistic [workspace.dependencies] -polkavm-linker = "0.8.2" -polkavm-derive = "0.8.0" +polkavm = "0.9.3" +polkavm-linker = "0.9.2" +polkavm-derive = "0.9.1" log = { version = "0.4.20", default-features = false } quote = { version = "1.0.33" } serde = { version = "1.0.197", default-features = false } diff --git a/polkadot/node/core/pvf/common/src/executor_interface.rs b/polkadot/node/core/pvf/common/src/executor_interface.rs index b69a87890f6..252e611db8a 100644 --- a/polkadot/node/core/pvf/common/src/executor_interface.rs +++ b/polkadot/node/core/pvf/common/src/executor_interface.rs @@ -24,7 +24,7 @@ use polkadot_primitives::{ use sc_executor_common::{ error::WasmError, runtime_blob::RuntimeBlob, - wasm_runtime::{HeapAllocStrategy, InvokeMethod, WasmModule as _}, + wasm_runtime::{HeapAllocStrategy, WasmModule as _}, }; use sc_executor_wasmtime::{Config, DeterministicStackLimit, Semantics, WasmtimeRuntime}; use sp_core::storage::{ChildInfo, TrackedStorageKey}; @@ -119,7 +119,7 @@ pub unsafe fn execute_artifact( match sc_executor::with_externalities_safe(&mut ext, || { let runtime = create_runtime_from_artifact_bytes(compiled_artifact_blob, executor_params)?; - runtime.new_instance()?.call(InvokeMethod::Export("validate_block"), params) + runtime.new_instance()?.call("validate_block", params) }) { Ok(Ok(ok)) => Ok(ok), Ok(Err(err)) | Err(err) => Err(err), diff --git a/substrate/client/executor/Cargo.toml b/substrate/client/executor/Cargo.toml index 4e36de3c4f4..7fad7e3a2a0 100644 --- a/substrate/client/executor/Cargo.toml +++ b/substrate/client/executor/Cargo.toml @@ -23,6 +23,7 @@ tracing = "0.1.29" codec = { package = "parity-scale-codec", version = "3.6.1" } sc-executor-common = { path = "common" } +sc-executor-polkavm = { path = "polkavm" } sc-executor-wasmtime = { path = "wasmtime" } sp-api = { path = "../../primitives/api" } sp-core = { path = "../../primitives/core" } diff --git a/substrate/client/executor/common/Cargo.toml b/substrate/client/executor/common/Cargo.toml index bfd1fa6b740..8ff34c3709a 100644 --- a/substrate/client/executor/common/Cargo.toml +++ b/substrate/client/executor/common/Cargo.toml @@ -22,6 +22,7 @@ wasm-instrument = "0.4" sc-allocator = { path = "../../allocator" } sp-maybe-compressed-blob = { path = "../../../primitives/maybe-compressed-blob" } sp-wasm-interface = { path = "../../../primitives/wasm-interface" } +polkavm = { workspace = true } [features] default = [] diff --git a/substrate/client/executor/common/src/error.rs b/substrate/client/executor/common/src/error.rs index 2a0dc364b41..b7c7e2b7019 100644 --- a/substrate/client/executor/common/src/error.rs +++ b/substrate/client/executor/common/src/error.rs @@ -150,6 +150,24 @@ pub enum WasmError { Other(String), } +impl From for WasmError { + fn from(error: polkavm::ProgramParseError) -> Self { + WasmError::Other(error.to_string()) + } +} + +impl From for WasmError { + fn from(error: polkavm::Error) -> Self { + WasmError::Other(error.to_string()) + } +} + +impl From for Error { + fn from(error: polkavm::Error) -> Self { + Error::Other(error.to_string()) + } +} + /// An error message with an attached backtrace. #[derive(Debug)] pub struct MessageWithBacktrace { diff --git a/substrate/client/executor/common/src/lib.rs b/substrate/client/executor/common/src/lib.rs index 751801fb30d..ff9fab2c888 100644 --- a/substrate/client/executor/common/src/lib.rs +++ b/substrate/client/executor/common/src/lib.rs @@ -25,3 +25,7 @@ pub mod error; pub mod runtime_blob; pub mod util; pub mod wasm_runtime; + +pub(crate) fn is_polkavm_enabled() -> bool { + std::env::var_os("SUBSTRATE_ENABLE_POLKAVM").map_or(false, |value| value == "1") +} diff --git a/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs b/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs index becf9e219b0..d689083b2f8 100644 --- a/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs +++ b/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs @@ -17,23 +17,23 @@ // along with this program. If not, see . use crate::{error::WasmError, wasm_runtime::HeapAllocStrategy}; -use wasm_instrument::{ - export_mutable_globals, - parity_wasm::elements::{ - deserialize_buffer, serialize, ExportEntry, External, Internal, MemorySection, MemoryType, - Module, Section, - }, +use wasm_instrument::parity_wasm::elements::{ + deserialize_buffer, serialize, ExportEntry, External, Internal, MemorySection, MemoryType, + Module, Section, }; -/// A bunch of information collected from a WebAssembly module. +/// A program blob containing a Substrate runtime. #[derive(Clone)] -pub struct RuntimeBlob { - raw_module: Module, +pub struct RuntimeBlob(BlobKind); + +#[derive(Clone)] +enum BlobKind { + WebAssembly(Module), + PolkaVM(polkavm::ProgramBlob<'static>), } impl RuntimeBlob { - /// Create `RuntimeBlob` from the given wasm code. Will attempt to decompress the code before - /// deserializing it. + /// Create `RuntimeBlob` from the given WASM or PolkaVM compressed program blob. /// /// See [`sp_maybe_compressed_blob`] for details about decompression. pub fn uncompress_if_needed(wasm_code: &[u8]) -> Result { @@ -43,31 +43,26 @@ impl RuntimeBlob { Self::new(&wasm_code) } - /// Create `RuntimeBlob` from the given wasm code. + /// Create `RuntimeBlob` from the given WASM or PolkaVM program blob. /// - /// Returns `Err` if the wasm code cannot be deserialized. - pub fn new(wasm_code: &[u8]) -> Result { - let raw_module: Module = deserialize_buffer(wasm_code) - .map_err(|e| WasmError::Other(format!("cannot deserialize module: {:?}", e)))?; - Ok(Self { raw_module }) - } - - /// The number of globals defined in locally in this module. - pub fn declared_globals_count(&self) -> u32 { - self.raw_module - .global_section() - .map(|gs| gs.entries().len() as u32) - .unwrap_or(0) - } - - /// The number of imports of globals. - pub fn imported_globals_count(&self) -> u32 { - self.raw_module.import_section().map(|is| is.globals() as u32).unwrap_or(0) - } + /// Returns `Err` if the blob cannot be deserialized. + /// + /// Will only accept a PolkaVM program if the `SUBSTRATE_ENABLE_POLKAVM` environment + /// variable is set to `1`. + pub fn new(raw_blob: &[u8]) -> Result { + if raw_blob.starts_with(b"PVM\0") { + if crate::is_polkavm_enabled() { + return Ok(Self(BlobKind::PolkaVM( + polkavm::ProgramBlob::parse(raw_blob)?.into_owned(), + ))); + } else { + return Err(WasmError::Other("expected a WASM runtime blob, found a PolkaVM runtime blob; set the 'SUBSTRATE_ENABLE_POLKAVM' environment variable to enable the experimental PolkaVM-based executor".to_string())); + } + } - /// Perform an instrumentation that makes sure that the mutable globals are exported. - pub fn expose_mutable_globals(&mut self) { - export_mutable_globals(&mut self.raw_module, "exported_internal_global"); + let raw_module: Module = deserialize_buffer(raw_blob) + .map_err(|e| WasmError::Other(format!("cannot deserialize module: {:?}", e)))?; + Ok(Self(BlobKind::WebAssembly(raw_module))) } /// Run a pass that instrument this module so as to introduce a deterministic stack height @@ -80,26 +75,16 @@ impl RuntimeBlob { /// /// The stack cost of a function is computed based on how much locals there are and the maximum /// depth of the wasm operand stack. + /// + /// Only valid for WASM programs; will return an error if the blob is a PolkaVM program. pub fn inject_stack_depth_metering(self, stack_depth_limit: u32) -> Result { let injected_module = - wasm_instrument::inject_stack_limiter(self.raw_module, stack_depth_limit).map_err( - |e| WasmError::Other(format!("cannot inject the stack limiter: {:?}", e)), - )?; - - Ok(Self { raw_module: injected_module }) - } + wasm_instrument::inject_stack_limiter(self.into_webassembly_blob()?, stack_depth_limit) + .map_err(|e| { + WasmError::Other(format!("cannot inject the stack limiter: {:?}", e)) + })?; - /// Perform an instrumentation that makes sure that a specific function `entry_point` is - /// exported - pub fn entry_point_exists(&self, entry_point: &str) -> bool { - self.raw_module - .export_section() - .map(|e| { - e.entries().iter().any(|e| { - matches!(e.internal(), Internal::Function(_)) && e.field() == entry_point - }) - }) - .unwrap_or_default() + Ok(Self(BlobKind::WebAssembly(injected_module))) } /// Converts a WASM memory import into a memory section and exports it. @@ -107,8 +92,11 @@ impl RuntimeBlob { /// Does nothing if there's no memory import. /// /// May return an error in case the WASM module is invalid. + /// + /// Only valid for WASM programs; will return an error if the blob is a PolkaVM program. pub fn convert_memory_import_into_export(&mut self) -> Result<(), WasmError> { - let import_section = match self.raw_module.import_section_mut() { + let raw_module = self.as_webassembly_blob_mut()?; + let import_section = match raw_module.import_section_mut() { Some(import_section) => import_section, None => return Ok(()), }; @@ -124,7 +112,7 @@ impl RuntimeBlob { let memory_name = entry.field().to_owned(); import_entries.remove(index); - self.raw_module + raw_module .insert_section(Section::Memory(MemorySection::with_entries(vec![memory_ty]))) .map_err(|error| { WasmError::Other(format!( @@ -133,14 +121,14 @@ impl RuntimeBlob { )) })?; - if self.raw_module.export_section_mut().is_none() { + if raw_module.export_section_mut().is_none() { // A module without an export section is somewhat unrealistic, but let's do this // just in case to cover all of our bases. - self.raw_module + raw_module .insert_section(Section::Export(Default::default())) .expect("an export section can be always inserted if it doesn't exist; qed"); } - self.raw_module + raw_module .export_section_mut() .expect("export section already existed or we just added it above, so it always exists; qed") .entries_mut() @@ -156,12 +144,14 @@ impl RuntimeBlob { /// /// Will return an error in case there is no memory section present, /// or if the memory section is empty. + /// + /// Only valid for WASM programs; will return an error if the blob is a PolkaVM program. pub fn setup_memory_according_to_heap_alloc_strategy( &mut self, heap_alloc_strategy: HeapAllocStrategy, ) -> Result<(), WasmError> { - let memory_section = self - .raw_module + let raw_module = self.as_webassembly_blob_mut()?; + let memory_section = raw_module .memory_section_mut() .ok_or_else(|| WasmError::Other("no memory section found".into()))?; @@ -187,8 +177,11 @@ impl RuntimeBlob { /// Scans the wasm blob for the first section with the name that matches the given. Returns the /// contents of the custom section if found or `None` otherwise. + /// + /// Only valid for WASM programs; will return an error if the blob is a PolkaVM program. pub fn custom_section_contents(&self, section_name: &str) -> Option<&[u8]> { - self.raw_module + self.as_webassembly_blob() + .ok()? .custom_sections() .find(|cs| cs.name() == section_name) .map(|cs| cs.payload()) @@ -196,11 +189,45 @@ impl RuntimeBlob { /// Consumes this runtime blob and serializes it. pub fn serialize(self) -> Vec { - serialize(self.raw_module).expect("serializing into a vec should succeed; qed") + match self.0 { + BlobKind::WebAssembly(raw_module) => + serialize(raw_module).expect("serializing into a vec should succeed; qed"), + BlobKind::PolkaVM(ref blob) => blob.as_bytes().to_vec(), + } + } + + fn as_webassembly_blob(&self) -> Result<&Module, WasmError> { + match self.0 { + BlobKind::WebAssembly(ref raw_module) => Ok(raw_module), + BlobKind::PolkaVM(..) => Err(WasmError::Other( + "expected a WebAssembly program; found a PolkaVM program blob".into(), + )), + } } - /// Destructure this structure into the underlying parity-wasm Module. - pub fn into_inner(self) -> Module { - self.raw_module + fn as_webassembly_blob_mut(&mut self) -> Result<&mut Module, WasmError> { + match self.0 { + BlobKind::WebAssembly(ref mut raw_module) => Ok(raw_module), + BlobKind::PolkaVM(..) => Err(WasmError::Other( + "expected a WebAssembly program; found a PolkaVM program blob".into(), + )), + } + } + + fn into_webassembly_blob(self) -> Result { + match self.0 { + BlobKind::WebAssembly(raw_module) => Ok(raw_module), + BlobKind::PolkaVM(..) => Err(WasmError::Other( + "expected a WebAssembly program; found a PolkaVM program blob".into(), + )), + } + } + + /// Gets a reference to the inner PolkaVM program blob, if this is a PolkaVM program. + pub fn as_polkavm_blob(&self) -> Option<&polkavm::ProgramBlob> { + match self.0 { + BlobKind::WebAssembly(..) => None, + BlobKind::PolkaVM(ref blob) => Some(blob), + } } } diff --git a/substrate/client/executor/common/src/wasm_runtime.rs b/substrate/client/executor/common/src/wasm_runtime.rs index d8e142b9d55..e8f429a3dbb 100644 --- a/substrate/client/executor/common/src/wasm_runtime.rs +++ b/substrate/client/executor/common/src/wasm_runtime.rs @@ -19,7 +19,6 @@ //! Definitions for a wasm runtime. use crate::error::Error; -use sp_wasm_interface::Value; pub use sc_allocator::AllocationStats; @@ -30,46 +29,6 @@ pub const DEFAULT_HEAP_ALLOC_STRATEGY: HeapAllocStrategy = /// Default heap allocation pages. pub const DEFAULT_HEAP_ALLOC_PAGES: u32 = 2048; -/// A method to be used to find the entrypoint when calling into the runtime -/// -/// Contains variants on how to resolve wasm function that will be invoked. -pub enum InvokeMethod<'a> { - /// Call function exported with this name. - /// - /// Located function should have (u32, u32) -> u64 signature. - Export(&'a str), - /// Call a function found in the exported table found under the given index. - /// - /// Located function should have (u32, u32) -> u64 signature. - Table(u32), - /// Call function by reference from table through a wrapper. - /// - /// Invoked function (`dispatcher_ref`) function - /// should have (u32, u32, u32) -> u64 signature. - /// - /// `func` will be passed to the invoked function as a first argument. - TableWithWrapper { - /// Wrapper for the call. - /// - /// Function pointer, index into runtime exported table. - dispatcher_ref: u32, - /// Extra argument for dispatch. - /// - /// Common usage would be to use it as an actual wasm function pointer - /// that should be invoked, but can be used as any extra argument on the - /// callee side. - /// - /// This is typically generated and invoked by the runtime itself. - func: u32, - }, -} - -impl<'a> From<&'a str> for InvokeMethod<'a> { - fn from(val: &'a str) -> InvokeMethod<'a> { - InvokeMethod::Export(val) - } -} - /// A trait that defines an abstract WASM runtime module. /// /// This can be implemented by an execution engine. @@ -87,7 +46,7 @@ pub trait WasmInstance: Send { /// Before execution, instance is reset. /// /// Returns the encoded result on success. - fn call(&mut self, method: InvokeMethod, data: &[u8]) -> Result, Error> { + fn call(&mut self, method: &str, data: &[u8]) -> Result, Error> { self.call_with_allocation_stats(method, data).0 } @@ -98,7 +57,7 @@ pub trait WasmInstance: Send { /// Returns the encoded result on success. fn call_with_allocation_stats( &mut self, - method: InvokeMethod, + method: &str, data: &[u8], ) -> (Result, Error>, Option); @@ -110,11 +69,6 @@ pub trait WasmInstance: Send { fn call_export(&mut self, method: &str, data: &[u8]) -> Result, Error> { self.call(method.into(), data) } - - /// Get the value from a global with the given `name`. - /// - /// This method is only suitable for getting immutable globals. - fn get_global_const(&mut self, name: &str) -> Result, Error>; } /// Defines the heap pages allocation strategy the wasm runtime should use. diff --git a/substrate/client/executor/polkavm/Cargo.toml b/substrate/client/executor/polkavm/Cargo.toml new file mode 100644 index 00000000000..9d0eb8ccf0e --- /dev/null +++ b/substrate/client/executor/polkavm/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "sc-executor-polkavm" +version = "0.29.0" +authors.workspace = true +edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.io" +repository.workspace = true +description = "PolkaVM executor for Substrate" +readme = "README.md" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +log = { workspace = true } +polkavm = { workspace = true } + +sc-executor-common = { path = "../common" } +sp-wasm-interface = { path = "../../../primitives/wasm-interface" } diff --git a/substrate/client/executor/polkavm/README.md b/substrate/client/executor/polkavm/README.md new file mode 100644 index 00000000000..64fc2fa0c28 --- /dev/null +++ b/substrate/client/executor/polkavm/README.md @@ -0,0 +1 @@ +License: GPL-3.0-or-later WITH Classpath-exception-2.0 diff --git a/substrate/client/executor/polkavm/src/lib.rs b/substrate/client/executor/polkavm/src/lib.rs new file mode 100644 index 00000000000..1bd72eb33d3 --- /dev/null +++ b/substrate/client/executor/polkavm/src/lib.rs @@ -0,0 +1,261 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use polkavm::{Caller, Reg}; +use sc_executor_common::{ + error::{Error, WasmError}, + wasm_runtime::{AllocationStats, WasmInstance, WasmModule}, +}; +use sp_wasm_interface::{ + Function, FunctionContext, HostFunctions, Pointer, Value, ValueType, WordSize, +}; + +#[repr(transparent)] +pub struct InstancePre(polkavm::InstancePre<()>); + +#[repr(transparent)] +pub struct Instance(polkavm::Instance<()>); + +impl WasmModule for InstancePre { + fn new_instance(&self) -> Result, Error> { + Ok(Box::new(Instance(self.0.instantiate()?))) + } +} + +impl WasmInstance for Instance { + fn call_with_allocation_stats( + &mut self, + name: &str, + raw_data: &[u8], + ) -> (Result, Error>, Option) { + let Some(method_index) = self.0.module().lookup_export(name) else { + return ( + Err(format!("cannot call into the runtime: export not found: '{name}'").into()), + None, + ); + }; + + let Ok(raw_data_length) = u32::try_from(raw_data.len()) else { + return ( + Err(format!("cannot call runtime method '{name}': input payload is too big").into()), + None, + ); + }; + + // TODO: This will leak guest memory; find a better solution. + let mut state_args = polkavm::StateArgs::new(); + + // Make sure the memory is cleared... + state_args.reset_memory(true); + // ...and allocate space for the input payload. + state_args.sbrk(raw_data_length); + + match self.0.update_state(state_args) { + Ok(()) => {}, + Err(polkavm::ExecutionError::Trap(trap)) => { + return (Err(format!("call into the runtime method '{name}' failed: failed to prepare the guest's memory: {trap}").into()), None); + }, + Err(polkavm::ExecutionError::Error(error)) => { + return (Err(format!("call into the runtime method '{name}' failed: failed to prepare the guest's memory: {error}").into()), None); + }, + Err(polkavm::ExecutionError::OutOfGas) => unreachable!("gas metering is never enabled"), + } + + // Grab the address of where the guest's heap starts; that's where we've just allocated + // the memory for the input payload. + let data_pointer = self.0.module().memory_map().heap_base(); + + if let Err(error) = self.0.write_memory(data_pointer, raw_data) { + return (Err(format!("call into the runtime method '{name}': failed to write the input payload into guest memory: {error}").into()), None); + } + + let mut state = (); + let mut call_args = polkavm::CallArgs::new(&mut state, method_index); + call_args.args_untyped(&[data_pointer, raw_data_length]); + + match self.0.call(Default::default(), call_args) { + Ok(()) => {}, + Err(polkavm::ExecutionError::Trap(trap)) => { + return ( + Err(format!("call into the runtime method '{name}' failed: {trap}").into()), + None, + ); + }, + Err(polkavm::ExecutionError::Error(error)) => { + return ( + Err(format!("call into the runtime method '{name}' failed: {error}").into()), + None, + ); + }, + Err(polkavm::ExecutionError::OutOfGas) => unreachable!("gas metering is never enabled"), + } + + let result_pointer = self.0.get_reg(Reg::A0); + let result_length = self.0.get_reg(Reg::A1); + let output = match self.0.read_memory_into_vec(result_pointer, result_length) { + Ok(output) => output, + Err(error) => { + return (Err(format!("call into the runtime method '{name}' failed: failed to read the return payload: {error}").into()), None) + }, + }; + + (Ok(output), None) + } +} + +struct Context<'r, 'a>(&'r mut polkavm::Caller<'a, ()>); + +impl<'r, 'a> FunctionContext for Context<'r, 'a> { + fn read_memory_into( + &self, + address: Pointer, + dest: &mut [u8], + ) -> sp_wasm_interface::Result<()> { + self.0 + .read_memory_into_slice(u32::from(address), dest) + .map_err(|error| error.to_string()) + .map(|_| ()) + } + + fn write_memory(&mut self, address: Pointer, data: &[u8]) -> sp_wasm_interface::Result<()> { + self.0.write_memory(u32::from(address), data).map_err(|error| error.to_string()) + } + + fn allocate_memory(&mut self, size: WordSize) -> sp_wasm_interface::Result> { + let pointer = self.0.sbrk(0).expect("fetching the current heap pointer never fails"); + + // TODO: This will leak guest memory; find a better solution. + self.0.sbrk(size).ok_or_else(|| String::from("allocation failed"))?; + + Ok(Pointer::new(pointer)) + } + + fn deallocate_memory(&mut self, _ptr: Pointer) -> sp_wasm_interface::Result<()> { + // This is only used by the allocator host function, which is unused under PolkaVM. + unimplemented!("'deallocate_memory' is never used when running under PolkaVM"); + } + + fn register_panic_error_message(&mut self, _message: &str) { + unimplemented!("'register_panic_error_message' is never used when running under PolkaVM"); + } +} + +fn call_host_function( + caller: &mut Caller<()>, + function: &dyn Function, +) -> Result<(), polkavm::Trap> { + let mut args = [Value::I64(0); Reg::ARG_REGS.len()]; + let mut nth_reg = 0; + for (nth_arg, kind) in function.signature().args.iter().enumerate() { + match kind { + ValueType::I32 => { + args[nth_arg] = Value::I32(caller.get_reg(Reg::ARG_REGS[nth_reg]) as i32); + nth_reg += 1; + }, + ValueType::F32 => { + args[nth_arg] = Value::F32(caller.get_reg(Reg::ARG_REGS[nth_reg])); + nth_reg += 1; + }, + ValueType::I64 => { + let value_lo = caller.get_reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + let value_hi = caller.get_reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + args[nth_arg] = + Value::I64((u64::from(value_lo) | (u64::from(value_hi) << 32)) as i64); + }, + ValueType::F64 => { + let value_lo = caller.get_reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + let value_hi = caller.get_reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + args[nth_arg] = Value::F64(u64::from(value_lo) | (u64::from(value_hi) << 32)); + }, + } + } + + log::trace!( + "Calling host function: '{}', args = {:?}", + function.name(), + &args[..function.signature().args.len()] + ); + + let value = match function + .execute(&mut Context(caller), &mut args.into_iter().take(function.signature().args.len())) + { + Ok(value) => value, + Err(error) => { + log::warn!("Call into the host function '{}' failed: {error}", function.name()); + return Err(polkavm::Trap::default()); + }, + }; + + if let Some(value) = value { + match value { + Value::I32(value) => { + caller.set_reg(Reg::A0, value as u32); + }, + Value::F32(value) => { + caller.set_reg(Reg::A0, value); + }, + Value::I64(value) => { + caller.set_reg(Reg::A0, value as u32); + caller.set_reg(Reg::A1, (value >> 32) as u32); + }, + Value::F64(value) => { + caller.set_reg(Reg::A0, value as u32); + caller.set_reg(Reg::A1, (value >> 32) as u32); + }, + } + } + + Ok(()) +} + +pub fn create_runtime(blob: &polkavm::ProgramBlob) -> Result, WasmError> +where + H: HostFunctions, +{ + static ENGINE: std::sync::OnceLock> = + std::sync::OnceLock::new(); + + let engine = ENGINE.get_or_init(|| { + let config = polkavm::Config::from_env()?; + polkavm::Engine::new(&config) + }); + + let engine = match engine { + Ok(ref engine) => engine, + Err(ref error) => { + return Err(WasmError::Other(error.to_string())); + }, + }; + + let module = polkavm::Module::from_blob(&engine, &polkavm::ModuleConfig::default(), blob)?; + let mut linker = polkavm::Linker::new(&engine); + for function in H::host_functions() { + linker.func_new(function.name(), |mut caller| call_host_function(&mut caller, function))?; + } + + let instance_pre = linker.instantiate_pre(&module)?; + Ok(Box::new(InstancePre(instance_pre))) +} diff --git a/substrate/client/executor/src/wasm_runtime.rs b/substrate/client/executor/src/wasm_runtime.rs index 501279a312c..be8344ba79b 100644 --- a/substrate/client/executor/src/wasm_runtime.rs +++ b/substrate/client/executor/src/wasm_runtime.rs @@ -297,6 +297,10 @@ pub fn create_wasm_runtime_with_code( where H: HostFunctions, { + if let Some(blob) = blob.as_polkavm_blob() { + return sc_executor_polkavm::create_runtime::(blob); + } + match wasm_method { WasmExecutionMethod::Compiled { instantiation_strategy } => sc_executor_wasmtime::create_runtime::( diff --git a/substrate/client/executor/wasmtime/src/instance_wrapper.rs b/substrate/client/executor/wasmtime/src/instance_wrapper.rs index 8852532adbc..4f67d1df9c5 100644 --- a/substrate/client/executor/wasmtime/src/instance_wrapper.rs +++ b/substrate/client/executor/wasmtime/src/instance_wrapper.rs @@ -22,36 +22,12 @@ use std::sync::Arc; use crate::runtime::{InstanceCounter, ReleaseInstanceHandle, Store, StoreData}; -use sc_executor_common::{ - error::{Backtrace, Error, MessageWithBacktrace, Result, WasmError}, - wasm_runtime::InvokeMethod, -}; -use sp_wasm_interface::{Pointer, Value, WordSize}; -use wasmtime::{ - AsContext, AsContextMut, Engine, Extern, Instance, InstancePre, Memory, Table, Val, -}; - -/// Invoked entrypoint format. -pub enum EntryPointType { - /// Direct call. - /// - /// Call is made by providing only payload reference and length. - Direct { entrypoint: wasmtime::TypedFunc<(u32, u32), u64> }, - /// Indirect call. - /// - /// Call is made by providing payload reference and length, and extra argument - /// for advanced routing. - Wrapped { - /// The extra argument passed to the runtime. It is typically a wasm function pointer. - func: u32, - dispatcher: wasmtime::TypedFunc<(u32, u32, u32), u64>, - }, -} +use sc_executor_common::error::{Backtrace, Error, MessageWithBacktrace, Result, WasmError}; +use sp_wasm_interface::{Pointer, WordSize}; +use wasmtime::{AsContext, AsContextMut, Engine, Instance, InstancePre, Memory}; /// Wasm blob entry point. -pub struct EntryPoint { - call_type: EntryPointType, -} +pub struct EntryPoint(wasmtime::TypedFunc<(u32, u32), u64>); impl EntryPoint { /// Call this entry point. @@ -64,13 +40,7 @@ impl EntryPoint { let data_ptr = u32::from(data_ptr); let data_len = u32::from(data_len); - match self.call_type { - EntryPointType::Direct { ref entrypoint } => - entrypoint.call(&mut *store, (data_ptr, data_len)), - EntryPointType::Wrapped { func, ref dispatcher } => - dispatcher.call(&mut *store, (func, data_ptr, data_len)), - } - .map_err(|trap| { + self.0.call(&mut *store, (data_ptr, data_len)).map_err(|trap| { let host_state = store .data_mut() .host_state @@ -99,18 +69,7 @@ impl EntryPoint { let entrypoint = func .typed::<(u32, u32), u64>(ctx) .map_err(|_| "Invalid signature for direct entry point")?; - Ok(Self { call_type: EntryPointType::Direct { entrypoint } }) - } - - pub fn wrapped( - dispatcher: wasmtime::Func, - func: u32, - ctx: impl AsContext, - ) -> std::result::Result { - let dispatcher = dispatcher - .typed::<(u32, u32, u32), u64>(ctx) - .map_err(|_| "Invalid signature for wrapped entry point")?; - Ok(Self { call_type: EntryPointType::Wrapped { func, dispatcher } }) + Ok(Self(entrypoint)) } } @@ -178,10 +137,8 @@ impl InstanceWrapper { })?; let memory = get_linear_memory(&instance, &mut store)?; - let table = get_table(&instance, &mut store); store.data_mut().memory = Some(memory); - store.data_mut().table = table; Ok(InstanceWrapper { instance, store, _release_instance_handle }) } @@ -190,61 +147,17 @@ impl InstanceWrapper { /// /// An entrypoint must have a signature `(i32, i32) -> i64`, otherwise this function will return /// an error. - pub fn resolve_entrypoint(&mut self, method: InvokeMethod) -> Result { - Ok(match method { - InvokeMethod::Export(method) => { - // Resolve the requested method and verify that it has a proper signature. - let export = - self.instance.get_export(&mut self.store, method).ok_or_else(|| { - Error::from(format!("Exported method {} is not found", method)) - })?; - let func = export - .into_func() - .ok_or_else(|| Error::from(format!("Export {} is not a function", method)))?; - EntryPoint::direct(func, &self.store).map_err(|_| { - Error::from(format!("Exported function '{}' has invalid signature.", method)) - })? - }, - InvokeMethod::Table(func_ref) => { - let table = self - .instance - .get_table(&mut self.store, "__indirect_function_table") - .ok_or(Error::NoTable)?; - let val = table - .get(&mut self.store, func_ref) - .ok_or(Error::NoTableEntryWithIndex(func_ref))?; - let func = val - .funcref() - .ok_or(Error::TableElementIsNotAFunction(func_ref))? - .ok_or(Error::FunctionRefIsNull(func_ref))?; - - EntryPoint::direct(*func, &self.store).map_err(|_| { - Error::from(format!( - "Function @{} in exported table has invalid signature for direct call.", - func_ref, - )) - })? - }, - InvokeMethod::TableWithWrapper { dispatcher_ref, func } => { - let table = self - .instance - .get_table(&mut self.store, "__indirect_function_table") - .ok_or(Error::NoTable)?; - let val = table - .get(&mut self.store, dispatcher_ref) - .ok_or(Error::NoTableEntryWithIndex(dispatcher_ref))?; - let dispatcher = val - .funcref() - .ok_or(Error::TableElementIsNotAFunction(dispatcher_ref))? - .ok_or(Error::FunctionRefIsNull(dispatcher_ref))?; - - EntryPoint::wrapped(*dispatcher, func, &self.store).map_err(|_| { - Error::from(format!( - "Function @{} in exported table has invalid signature for wrapped call.", - dispatcher_ref, - )) - })? - }, + pub fn resolve_entrypoint(&mut self, method: &str) -> Result { + // Resolve the requested method and verify that it has a proper signature. + let export = self + .instance + .get_export(&mut self.store, method) + .ok_or_else(|| Error::from(format!("Exported method {} is not found", method)))?; + let func = export + .into_func() + .ok_or_else(|| Error::from(format!("Export {} is not a function", method)))?; + EntryPoint::direct(func, &self.store).map_err(|_| { + Error::from(format!("Exported function '{}' has invalid signature.", method)) }) } @@ -268,24 +181,6 @@ impl InstanceWrapper { Ok(heap_base as u32) } - - /// Get the value from a global with the given `name`. - pub fn get_global_val(&mut self, name: &str) -> Result> { - let global = match self.instance.get_export(&mut self.store, name) { - Some(global) => global, - None => return Ok(None), - }; - - let global = global.into_global().ok_or_else(|| format!("`{}` is not a global", name))?; - - match global.get(&mut self.store) { - Val::I32(val) => Ok(Some(Value::I32(val))), - Val::I64(val) => Ok(Some(Value::I64(val))), - Val::F32(val) => Ok(Some(Value::F32(val))), - Val::F64(val) => Ok(Some(Value::F64(val))), - _ => Err("Unknown value type".into()), - } - } } /// Extract linear memory instance from the given instance. @@ -301,15 +196,6 @@ fn get_linear_memory(instance: &Instance, ctx: impl AsContextMut) -> Result Option
CommittedCandidateReceipt<H = Hash>
{ - instance - .get_export(ctx, "__indirect_function_table") - .as_ref() - .cloned() - .and_then(Extern::into_table) -} - /// Functions related to memory. impl InstanceWrapper { pub(crate) fn store(&self) -> &Store { diff --git a/substrate/client/executor/wasmtime/src/runtime.rs b/substrate/client/executor/wasmtime/src/runtime.rs index ac88663f4e7..595cc503272 100644 --- a/substrate/client/executor/wasmtime/src/runtime.rs +++ b/substrate/client/executor/wasmtime/src/runtime.rs @@ -30,10 +30,10 @@ use sc_executor_common::{ error::{Error, Result, WasmError}, runtime_blob::RuntimeBlob, util::checked_range, - wasm_runtime::{HeapAllocStrategy, InvokeMethod, WasmInstance, WasmModule}, + wasm_runtime::{HeapAllocStrategy, WasmInstance, WasmModule}, }; use sp_runtime_interface::unpack_ptr_and_len; -use sp_wasm_interface::{HostFunctions, Pointer, Value, WordSize}; +use sp_wasm_interface::{HostFunctions, Pointer, WordSize}; use std::{ path::{Path, PathBuf}, sync::{ @@ -41,7 +41,7 @@ use std::{ Arc, }, }; -use wasmtime::{AsContext, Engine, Memory, Table}; +use wasmtime::{AsContext, Engine, Memory}; const MAX_INSTANCE_COUNT: u32 = 64; @@ -51,8 +51,6 @@ pub(crate) struct StoreData { pub(crate) host_state: Option, /// This will be always set once the store is initialized. pub(crate) memory: Option, - /// This will be set only if the runtime actually contains a table. - pub(crate) table: Option
, } impl StoreData { @@ -164,7 +162,7 @@ pub struct WasmtimeInstance { impl WasmtimeInstance { fn call_impl( &mut self, - method: InvokeMethod, + method: &str, data: &[u8], allocation_stats: &mut Option, ) -> Result> { @@ -184,20 +182,13 @@ impl WasmtimeInstance { impl WasmInstance for WasmtimeInstance { fn call_with_allocation_stats( &mut self, - method: InvokeMethod, + method: &str, data: &[u8], ) -> (Result>, Option) { let mut allocation_stats = None; let result = self.call_impl(method, data, &mut allocation_stats); (result, allocation_stats) } - - fn get_global_const(&mut self, name: &str) -> Result> { - match &mut self.strategy { - Strategy::RecreateInstance(ref mut instance_creator) => - instance_creator.instantiate()?.get_global_val(name), - } - } } /// Prepare a directory structure and a config file to enable wasmtime caching. diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index c78def9bf44..e47775d56d9 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -38,6 +38,9 @@ tracing-core = { version = "0.1.32", default-features = false } # Required for backwards compatibility reason, but only used for verifying when `UseDalekExt` is set. ed25519-dalek = { version = "2.1", default-features = false, optional = true } +[target.'cfg(all(any(target_arch = "riscv32", target_arch = "riscv64"), substrate_runtime))'.dependencies] +polkavm-derive = { workspace = true } + [build-dependencies] rustversion = "1.0.6" diff --git a/substrate/primitives/io/src/global_alloc_riscv.rs b/substrate/primitives/io/src/global_alloc_riscv.rs new file mode 100644 index 00000000000..8d7c69c2094 --- /dev/null +++ b/substrate/primitives/io/src/global_alloc_riscv.rs @@ -0,0 +1,19 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[global_allocator] +static ALLOCATOR: polkavm_derive::LeakingAllocator = polkavm_derive::LeakingAllocator; diff --git a/substrate/primitives/io/src/global_alloc_wasm.rs b/substrate/primitives/io/src/global_alloc_wasm.rs new file mode 100644 index 00000000000..cf19a6e21a2 --- /dev/null +++ b/substrate/primitives/io/src/global_alloc_wasm.rs @@ -0,0 +1,34 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use core::alloc::{GlobalAlloc, Layout}; + +/// Allocator used by Substrate from within the runtime. +struct RuntimeAllocator; + +#[global_allocator] +static ALLOCATOR: RuntimeAllocator = RuntimeAllocator; + +unsafe impl GlobalAlloc for RuntimeAllocator { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + crate::allocator::malloc(layout.size() as u32) + } + + unsafe fn dealloc(&self, ptr: *mut u8, _: Layout) { + crate::allocator::free(ptr) + } +} diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index 6d34199416a..684854ea5c8 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -129,6 +129,16 @@ use sp_externalities::{Externalities, ExternalitiesExt}; pub use sp_externalities::MultiRemovalResults; +#[cfg(all(not(feature = "disable_allocator"), substrate_runtime, target_family = "wasm"))] +mod global_alloc_wasm; + +#[cfg(all( + not(feature = "disable_allocator"), + substrate_runtime, + any(target_arch = "riscv32", target_arch = "riscv64") +))] +mod global_alloc_riscv; + #[cfg(feature = "std")] const LOG_TARGET: &str = "runtime::io"; @@ -1737,30 +1747,6 @@ mod tracing_setup { pub use tracing_setup::init_tracing; -/// Allocator used by Substrate from within the runtime. -#[cfg(substrate_runtime)] -struct RuntimeAllocator; - -#[cfg(all(not(feature = "disable_allocator"), substrate_runtime))] -#[global_allocator] -static ALLOCATOR: RuntimeAllocator = RuntimeAllocator; - -#[cfg(substrate_runtime)] -mod allocator_impl { - use super::*; - use core::alloc::{GlobalAlloc, Layout}; - - unsafe impl GlobalAlloc for RuntimeAllocator { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - allocator::malloc(layout.size() as u32) - } - - unsafe fn dealloc(&self, ptr: *mut u8, _: Layout) { - allocator::free(ptr) - } - } -} - /// Crashes the execution of the program. /// /// Equivalent to the WASM `unreachable` instruction, RISC-V `unimp` instruction, diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index 9ffb5c72fd9..13db9fce431 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -855,7 +855,7 @@ fn build_bloaty_blob( println!("{} {}", colorize_info_message("Using rustc version:"), cargo_cmd.rustc_version()); // Use `process::exit(1)` to have a clean error output. - if !matches!(build_cmd.status().map(|s| s.success()), Ok(true)) { + if !build_cmd.status().map_or(false, |s| s.success()) { process::exit(1); } -- GitLab From 1ead59773e2dab336d2b54295419bbc3fe7c687f Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Tue, 12 Mar 2024 10:33:54 +0200 Subject: [PATCH 84/86] Add api-name in `cannot query the runtime API version` warning (#3653) Sometimes we see nodes printing this warning: ``` cannot query the runtime API version: Api called for an unknown Block: State already discarded for ``` The log is harmless, but let's print the api we got this for, so that we can track its call site and truly confirm it is harmless or fix it. Signed-off-by: Alexandru Gheorghe --- polkadot/node/core/runtime-api/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index 4bedfd82734..3ff1a35d068 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -433,6 +433,7 @@ where .unwrap_or_else(|e| { gum::warn!( target: LOG_TARGET, + api = ?stringify!($api_name), "cannot query the runtime API version: {}", e, ); -- GitLab From a756baf3b211efc57ca4a0089b7ceba4c20b9f12 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 12 Mar 2024 12:43:31 +0100 Subject: [PATCH 85/86] Support for `keyring` in runtimes (#2044) This functionality is required for #1984. This PR enables [`sp-keyring`](https://github.com/paritytech/polkadot-sdk/blob/21d36b7b4229c4d5225944f197918cde23fda4ea/substrate/primitives/keyring/src/sr25519.rs#L31-L40) in `no-std` environments, allowing to generate the public key (e.g. `AccountKeyring::Alice.public().to_ss58check()`), which can be later used in the any of built-in [_runtime-genesis-config_ variant](https://github.com/paritytech/polkadot-sdk/blob/21d36b7b4229c4d5225944f197918cde23fda4ea/polkadot/node/service/src/chain_spec.rs#L1066-L1073). The proposal is as follows: - expose [`core::Pair` trait](https://github.com/paritytech/polkadot-sdk/blob/d6f15306282e3de848a09c9aa9cba6f95a7811f0/substrate/primitives/core/src/crypto.rs#L832) in `no-std`, - `full_crypto` feature enables `sign` method, - `std` feature enables `generate_with_phrase` and `generate` methods (randomness is required), - All other functionality, currently gated by `full_crypto` will be available unconditionally (`no-std`): -- `from_string` -- `from_string_with_seed` -- `from seed` -- `from_seed_slice` -- `from_phrase` -- `derive` -- `verify` --- Depends on https://github.com/rust-bitcoin/rust-bip39/pull/57 --------- Co-authored-by: command-bot <> Co-authored-by: Davide Galassi --- .gitlab/pipeline/check.yml | 16 +++++ Cargo.lock | 43 ++++++++++++-- substrate/client/cli/Cargo.toml | 3 +- substrate/client/cli/src/commands/generate.rs | 2 +- .../check-features-variants.sh | 12 ++++ .../application-crypto/src/bls377.rs | 5 +- .../application-crypto/src/ecdsa.rs | 4 +- .../application-crypto/src/ecdsa_bls377.rs | 1 + .../application-crypto/src/ed25519.rs | 4 +- .../primitives/application-crypto/src/lib.rs | 59 +++++++++++++------ .../application-crypto/src/sr25519.rs | 4 +- .../application-crypto/src/traits.rs | 14 +---- substrate/primitives/core/Cargo.toml | 24 +++----- .../core/check-features-variants.sh | 13 ++++ substrate/primitives/core/src/address_uri.rs | 2 +- substrate/primitives/core/src/bandersnatch.rs | 28 ++++----- substrate/primitives/core/src/bls.rs | 31 +++------- substrate/primitives/core/src/crypto.rs | 24 +------- substrate/primitives/core/src/ecdsa.rs | 34 ++++------- substrate/primitives/core/src/ed25519.rs | 21 ++----- substrate/primitives/core/src/lib.rs | 3 - .../primitives/core/src/paired_crypto.rs | 27 +++------ substrate/primitives/core/src/sr25519.rs | 30 +++------- substrate/primitives/keyring/Cargo.toml | 7 ++- .../keyring/check-features-variants.sh | 8 +++ .../primitives/keyring/src/bandersnatch.rs | 20 +++++-- substrate/primitives/keyring/src/ed25519.rs | 12 +++- substrate/primitives/keyring/src/lib.rs | 2 + substrate/primitives/keyring/src/sr25519.rs | 20 +++++-- substrate/test-utils/runtime/Cargo.toml | 6 +- 30 files changed, 247 insertions(+), 232 deletions(-) create mode 100755 substrate/primitives/application-crypto/check-features-variants.sh create mode 100755 substrate/primitives/core/check-features-variants.sh create mode 100755 substrate/primitives/keyring/check-features-variants.sh diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 4d71a473372..52da3355050 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -259,3 +259,19 @@ find-fail-ci-phrase: echo "No $ASSERT_REGEX was found, exiting with 0"; exit 0; fi + +check-core-crypto-features: + stage: check + extends: + - .docker-env + - .common-refs + script: + - pushd substrate/primitives/core + - ./check-features-variants.sh + - popd + - pushd substrate/primitives/application-crypto + - ./check-features-variants.sh + - popd + - pushd substrate/primitives/keyring + - ./check-features-variants.sh + - popd diff --git a/Cargo.lock b/Cargo.lock index 5eab3cf7c24..37c5a5177c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1434,9 +1434,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" dependencies = [ - "bitcoin_hashes", - "rand", - "rand_core 0.6.4", + "bitcoin_hashes 0.11.0", "serde", "unicode-normalization", ] @@ -1456,12 +1454,28 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +[[package]] +name = "bitcoin-internals" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" + [[package]] name = "bitcoin_hashes" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" +[[package]] +name = "bitcoin_hashes" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" +dependencies = [ + "bitcoin-internals", + "hex-conservative", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -6348,6 +6362,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex-conservative" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30ed443af458ccb6d81c1e7e661545f94d3176752fb1df2f543b902a1e0f51e2" + [[package]] name = "hex-literal" version = "0.4.1" @@ -11411,6 +11431,19 @@ dependencies = [ "substrate-wasm-builder", ] +[[package]] +name = "parity-bip39" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9" +dependencies = [ + "bitcoin_hashes 0.13.0", + "rand", + "rand_core 0.6.4", + "serde", + "unicode-normalization", +] + [[package]] name = "parity-bytes" version = "0.1.2" @@ -15662,7 +15695,6 @@ name = "sc-cli" version = "0.36.0" dependencies = [ "array-bytes 6.1.0", - "bip39", "chrono", "clap 4.5.1", "fdlimit", @@ -15672,6 +15704,7 @@ dependencies = [ "libp2p-identity", "log", "names", + "parity-bip39", "parity-scale-codec", "rand", "regex", @@ -18611,7 +18644,6 @@ version = "28.0.0" dependencies = [ "array-bytes 6.1.0", "bandersnatch_vrfs", - "bip39", "bitflags 1.3.2", "blake2 0.10.6", "bounded-collections", @@ -18629,6 +18661,7 @@ dependencies = [ "libsecp256k1", "log", "merlin", + "parity-bip39", "parity-scale-codec", "parking_lot 0.12.1", "paste", diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index 0582018283c..c70a0893c72 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -32,7 +32,8 @@ rpassword = "7.0.0" serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } -bip39 = "2.0.0" +# personal fork here as workaround for: https://github.com/rust-bitcoin/rust-bip39/pull/64 +bip39 = { package = "parity-bip39", version = "2.0.1", features = ["rand"] } tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "signal"] } sc-client-api = { path = "../api" } sc-client-db = { path = "../db", default-features = false } diff --git a/substrate/client/cli/src/commands/generate.rs b/substrate/client/cli/src/commands/generate.rs index c465bcc85a4..94769279e21 100644 --- a/substrate/client/cli/src/commands/generate.rs +++ b/substrate/client/cli/src/commands/generate.rs @@ -64,7 +64,7 @@ impl GenerateCmd { let password = self.keystore_params.read_password()?; let output = self.output_scheme.output_type; - let phrase = mnemonic.word_iter().join(" "); + let phrase = mnemonic.words().join(" "); with_crypto_scheme!( self.crypto_scheme.scheme, diff --git a/substrate/primitives/application-crypto/check-features-variants.sh b/substrate/primitives/application-crypto/check-features-variants.sh new file mode 100755 index 00000000000..dd45a212bae --- /dev/null +++ b/substrate/primitives/application-crypto/check-features-variants.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env -S bash -eux + +export RUSTFLAGS="-Cdebug-assertions=y -Dwarnings" +T=wasm32-unknown-unknown +cargo check --release +cargo check --release --target=$T --no-default-features +cargo check --release --target=$T --no-default-features --features="full_crypto" +cargo check --release --target=$T --no-default-features --features="serde" +cargo check --release --target=$T --no-default-features --features="serde,full_crypto" +cargo check --release --target=$T --no-default-features --features="bandersnatch-experimental" +cargo check --release --target=$T --no-default-features --features="bls-experimental" +cargo check --release --target=$T --no-default-features --features="bls-experimental,full_crypto" diff --git a/substrate/primitives/application-crypto/src/bls377.rs b/substrate/primitives/application-crypto/src/bls377.rs index ee17060564f..3bd01de139c 100644 --- a/substrate/primitives/application-crypto/src/bls377.rs +++ b/substrate/primitives/application-crypto/src/bls377.rs @@ -19,14 +19,13 @@ use crate::{KeyTypeId, RuntimePublic}; pub use sp_core::bls::bls377::*; +use sp_std::vec::Vec; mod app { crate::app_crypto!(super, sp_core::testing::BLS377); } -#[cfg(feature = "full_crypto")] -pub use app::Pair as AppPair; -pub use app::{Public as AppPublic, Signature as AppSignature}; +pub use app::{Pair as AppPair, Public as AppPublic, Signature as AppSignature}; impl RuntimePublic for Public { type Signature = Signature; diff --git a/substrate/primitives/application-crypto/src/ecdsa.rs b/substrate/primitives/application-crypto/src/ecdsa.rs index 27ffe12579f..439b51dc604 100644 --- a/substrate/primitives/application-crypto/src/ecdsa.rs +++ b/substrate/primitives/application-crypto/src/ecdsa.rs @@ -27,9 +27,7 @@ mod app { crate::app_crypto!(super, sp_core::testing::ECDSA); } -#[cfg(feature = "full_crypto")] -pub use app::Pair as AppPair; -pub use app::{Public as AppPublic, Signature as AppSignature}; +pub use app::{Pair as AppPair, Public as AppPublic, Signature as AppSignature}; impl RuntimePublic for Public { type Signature = Signature; diff --git a/substrate/primitives/application-crypto/src/ecdsa_bls377.rs b/substrate/primitives/application-crypto/src/ecdsa_bls377.rs index 70940587ced..8dee73095fb 100644 --- a/substrate/primitives/application-crypto/src/ecdsa_bls377.rs +++ b/substrate/primitives/application-crypto/src/ecdsa_bls377.rs @@ -18,6 +18,7 @@ //! ECDSA and BLS12-377 paired crypto applications. use crate::{KeyTypeId, RuntimePublic}; +use sp_std::vec::Vec; pub use sp_core::paired_crypto::ecdsa_bls377::*; diff --git a/substrate/primitives/application-crypto/src/ed25519.rs b/substrate/primitives/application-crypto/src/ed25519.rs index bc05018370e..addefe7daf6 100644 --- a/substrate/primitives/application-crypto/src/ed25519.rs +++ b/substrate/primitives/application-crypto/src/ed25519.rs @@ -27,9 +27,7 @@ mod app { crate::app_crypto!(super, sp_core::testing::ED25519); } -#[cfg(feature = "full_crypto")] -pub use app::Pair as AppPair; -pub use app::{Public as AppPublic, Signature as AppSignature}; +pub use app::{Pair as AppPair, Public as AppPublic, Signature as AppSignature}; impl RuntimePublic for Public { type Signature = Signature; diff --git a/substrate/primitives/application-crypto/src/lib.rs b/substrate/primitives/application-crypto/src/lib.rs index 686b486f335..ea2e5a83127 100644 --- a/substrate/primitives/application-crypto/src/lib.rs +++ b/substrate/primitives/application-crypto/src/lib.rs @@ -20,12 +20,9 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -pub use sp_core::crypto::{key_types, CryptoTypeId, KeyTypeId}; +pub use sp_core::crypto::{key_types, CryptoTypeId, DeriveJunction, KeyTypeId, Ss58Codec}; #[doc(hidden)] -#[cfg(feature = "full_crypto")] pub use sp_core::crypto::{DeriveError, Pair, SecretStringError}; -#[cfg(any(feature = "full_crypto", feature = "serde"))] -pub use sp_core::crypto::{DeriveJunction, Ss58Codec}; #[doc(hidden)] pub use sp_core::{ self, @@ -85,7 +82,7 @@ macro_rules! app_crypto { $module::CRYPTO_ID ); $crate::app_crypto_signature_common!($module::Signature, $key_type); - $crate::app_crypto_pair!($module::Pair, $key_type, $module::CRYPTO_ID); + $crate::app_crypto_pair_common!($module::Pair, $key_type, $module::CRYPTO_ID); }; } @@ -116,13 +113,15 @@ macro_rules! app_crypto { $module::CRYPTO_ID ); $crate::app_crypto_signature_common!($module::Signature, $key_type); + $crate::app_crypto_pair_common!($module::Pair, $key_type, $module::CRYPTO_ID); }; } /// Declares `Pair` type which is functionally equivalent to `$pair`, but is /// new application-specific type whose identifier is `$key_type`. +/// It is a common part shared between full_crypto and non full_crypto environments. #[macro_export] -macro_rules! app_crypto_pair { +macro_rules! app_crypto_pair_common { ($pair:ty, $key_type:expr, $crypto_type:expr) => { $crate::wrap! { /// A generic `AppPublic` wrapper type over $pair crypto; this has no specific App. @@ -140,7 +139,14 @@ macro_rules! app_crypto_pair { type Signature = Signature; $crate::app_crypto_pair_functions_if_std!($pair); + $crate::app_crypto_pair_functions_if_full_crypto!($pair); + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Self, Self::Seed), $crate::SecretStringError> { + <$pair>::from_phrase(phrase, password).map(|r| (Self(r.0), r.1)) + } fn derive>( &self, path: Iter, @@ -154,9 +160,6 @@ macro_rules! app_crypto_pair { fn from_seed_slice(seed: &[u8]) -> Result { <$pair>::from_seed_slice(seed).map(Self) } - fn sign(&self, msg: &[u8]) -> Self::Signature { - Signature(self.0.sign(msg)) - } fn verify>( sig: &Self::Signature, message: M, @@ -203,13 +206,6 @@ macro_rules! app_crypto_pair_functions_if_std { let r = <$pair>::generate_with_phrase(password); (Self(r.0), r.1, r.2) } - - fn from_phrase( - phrase: &str, - password: Option<&str>, - ) -> Result<(Self, Self::Seed), $crate::SecretStringError> { - <$pair>::from_phrase(phrase, password).map(|r| (Self(r.0), r.1)) - } }; } @@ -220,6 +216,25 @@ macro_rules! app_crypto_pair_functions_if_std { ($pair:ty) => {}; } +/// Implements functions for the `Pair` trait when `feature = "full_crypto"` is enabled. +#[doc(hidden)] +#[cfg(feature = "full_crypto")] +#[macro_export] +macro_rules! app_crypto_pair_functions_if_full_crypto { + ($pair:ty) => { + fn sign(&self, msg: &[u8]) -> Self::Signature { + Signature(self.0.sign(msg)) + } + }; +} + +#[doc(hidden)] +#[cfg(not(feature = "full_crypto"))] +#[macro_export] +macro_rules! app_crypto_pair_functions_if_full_crypto { + ($pair:ty) => {}; +} + /// Declares `Public` type which is functionally equivalent to `$public` but is /// new application-specific type whose identifier is `$key_type`. /// For full functionality, `app_crypto_public_common!` must be called too. @@ -267,7 +282,7 @@ macro_rules! app_crypto_public_not_full_crypto { $crate::wrap! { /// A generic `AppPublic` wrapper type over $public crypto; this has no specific App. #[derive( - Clone, Eq, PartialEq, Ord, PartialOrd, + Clone, Eq, Hash, PartialEq, Ord, PartialOrd, $crate::codec::Encode, $crate::codec::Decode, $crate::RuntimeDebug, @@ -277,10 +292,13 @@ macro_rules! app_crypto_public_not_full_crypto { pub struct Public($public); } - impl $crate::CryptoType for Public {} + impl $crate::CryptoType for Public { + type Pair = Pair; + } impl $crate::AppCrypto for Public { type Public = Public; + type Pair = Pair; type Signature = Signature; const ID: $crate::KeyTypeId = $key_type; const CRYPTO_ID: $crate::CryptoTypeId = $crypto_type; @@ -452,10 +470,13 @@ macro_rules! app_crypto_signature_not_full_crypto { pub struct Signature($sig); } - impl $crate::CryptoType for Signature {} + impl $crate::CryptoType for Signature { + type Pair = Pair; + } impl $crate::AppCrypto for Signature { type Public = Public; + type Pair = Pair; type Signature = Signature; const ID: $crate::KeyTypeId = $key_type; const CRYPTO_ID: $crate::CryptoTypeId = $crypto_type; diff --git a/substrate/primitives/application-crypto/src/sr25519.rs b/substrate/primitives/application-crypto/src/sr25519.rs index 7c91bfa7bb5..d411cc253c0 100644 --- a/substrate/primitives/application-crypto/src/sr25519.rs +++ b/substrate/primitives/application-crypto/src/sr25519.rs @@ -27,9 +27,7 @@ mod app { crate::app_crypto!(super, sp_core::testing::SR25519); } -#[cfg(feature = "full_crypto")] -pub use app::Pair as AppPair; -pub use app::{Public as AppPublic, Signature as AppSignature}; +pub use app::{Pair as AppPair, Public as AppPublic, Signature as AppSignature}; impl RuntimePublic for Public { type Signature = Signature; diff --git a/substrate/primitives/application-crypto/src/traits.rs b/substrate/primitives/application-crypto/src/traits.rs index e9b1080f63d..0b59abf272d 100644 --- a/substrate/primitives/application-crypto/src/traits.rs +++ b/substrate/primitives/application-crypto/src/traits.rs @@ -18,9 +18,7 @@ use codec::Codec; use scale_info::TypeInfo; -#[cfg(feature = "full_crypto")] -use sp_core::crypto::Pair; -use sp_core::crypto::{CryptoType, CryptoTypeId, IsWrappedBy, KeyTypeId, Public}; +use sp_core::crypto::{CryptoType, CryptoTypeId, IsWrappedBy, KeyTypeId, Pair, Public}; use sp_std::{fmt::Debug, vec::Vec}; /// Application-specific cryptographic object. @@ -45,24 +43,14 @@ pub trait AppCrypto: 'static + Sized + CryptoType { type Signature: AppSignature; /// The corresponding key pair type in this application scheme. - #[cfg(feature = "full_crypto")] type Pair: AppPair; } /// Type which implements Hash in std, not when no-std (std variant). -#[cfg(any(feature = "std", feature = "full_crypto"))] pub trait MaybeHash: sp_std::hash::Hash {} -#[cfg(any(feature = "std", feature = "full_crypto"))] impl MaybeHash for T {} -/// Type which implements Hash in std, not when no-std (no-std variant). -#[cfg(all(not(feature = "std"), not(feature = "full_crypto")))] -pub trait MaybeHash {} -#[cfg(all(not(feature = "std"), not(feature = "full_crypto")))] -impl MaybeHash for T {} - /// Application-specific key pair. -#[cfg(feature = "full_crypto")] pub trait AppPair: AppCrypto + Pair::Public, Signature = ::Signature> { diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index f7cc2b4fcf7..908f2498de5 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -27,10 +27,11 @@ hash-db = { version = "0.16.0", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } bs58 = { version = "0.5.0", default-features = false, optional = true } rand = { version = "0.8.5", features = ["small_rng"], optional = true } -substrate-bip39 = { path = "../../utils/substrate-bip39", optional = true } -bip39 = { version = "2.0.0", default-features = false } +substrate-bip39 = { path = "../../utils/substrate-bip39", default-features = false } +# personal fork here as workaround for: https://github.com/rust-bitcoin/rust-bip39/pull/64 +bip39 = { package = "parity-bip39", version = "2.0.1", default-features = false, features = ["alloc"] } zeroize = { version = "1.4.3", default-features = false } -secrecy = { version = "0.8.0", default-features = false } +secrecy = { version = "0.8.0", default-features = false, features = ["alloc"] } parking_lot = { version = "0.12.1", optional = true } ss58-registry = { version = "1.34.0", default-features = false } sp-std = { path = "../std", default-features = false } @@ -46,13 +47,13 @@ paste = "1.0.7" itertools = { version = "0.10.3", optional = true } # full crypto -array-bytes = { version = "6.1", optional = true } -ed25519-zebra = { version = "3.1.0", default-features = false, optional = true } +array-bytes = { version = "6.1" } +ed25519-zebra = { version = "3.1.0", default-features = false } blake2 = { version = "0.10.4", default-features = false, optional = true } -libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"], optional = true } +libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"] } schnorrkel = { version = "0.11.4", features = ["preaudit_deprecated"], default-features = false } merlin = { version = "3.0", default-features = false } -sp-crypto-hashing = { path = "../crypto/hashing", default-features = false, optional = true } +sp-crypto-hashing = { path = "../crypto/hashing", default-features = false } sp-runtime-interface = { path = "../runtime-interface", default-features = false } # k256 crate, better portability, intended to be used in substrate-runtimes (no-std) k256 = { version = "0.13.3", features = ["alloc", "ecdsa"], default-features = false } @@ -81,7 +82,6 @@ bench = false default = ["std"] std = [ - "array-bytes", "bandersnatch_vrfs?/std", "bip39/rand", "bip39/std", @@ -112,7 +112,6 @@ std = [ "schnorrkel/std", "secp256k1/global-context", "secp256k1/std", - "secrecy/alloc", "serde/std", "sp-crypto-hashing/std", "sp-debug-derive/std", @@ -131,7 +130,6 @@ std = [ # Serde support without relying on std features. serde = [ - "array-bytes", "blake2", "bounded-collections/serde", "bs58/alloc", @@ -140,8 +138,6 @@ serde = [ "k256/serde", "primitive-types/serde_no_std", "scale-info/serde", - "secrecy/alloc", - "sp-crypto-hashing", "sp-storage/serde", ] @@ -149,11 +145,7 @@ serde = [ # or Intel SGX. # For the regular wasm runtime builds this should not be used. full_crypto = [ - "array-bytes", "blake2", - "ed25519-zebra", - "libsecp256k1", - "sp-crypto-hashing", "sp-runtime-interface/disable_target_static_assertions", ] diff --git a/substrate/primitives/core/check-features-variants.sh b/substrate/primitives/core/check-features-variants.sh new file mode 100755 index 00000000000..6d28212065a --- /dev/null +++ b/substrate/primitives/core/check-features-variants.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env -S bash -eux + +export RUSTFLAGS="-Cdebug-assertions=y -Dwarnings" +T=wasm32-unknown-unknown + +cargo check --target=$T --release --no-default-features --features="bls-experimental" +cargo check --target=$T --release --no-default-features --features="full_crypto,bls-experimental" +cargo check --target=$T --release --no-default-features --features="bandersnatch-experimental" +cargo check --target=$T --release --no-default-features --features="full_crypto,serde,bandersnatch-experimental" +cargo check --target=$T --release --no-default-features --features="full_crypto,serde" +cargo check --target=$T --release --no-default-features --features="full_crypto" +cargo check --target=$T --release --no-default-features --features="serde" +cargo check --target=$T --release --no-default-features diff --git a/substrate/primitives/core/src/address_uri.rs b/substrate/primitives/core/src/address_uri.rs index 211d47c0093..2e32d0cd86d 100644 --- a/substrate/primitives/core/src/address_uri.rs +++ b/substrate/primitives/core/src/address_uri.rs @@ -17,7 +17,7 @@ //! Little util for parsing an address URI. Replaces regular expressions. -#[cfg(all(not(feature = "std"), any(feature = "serde", feature = "full_crypto")))] +#[cfg(not(feature = "std"))] use sp_std::{ alloc::string::{String, ToString}, vec::Vec, diff --git a/substrate/primitives/core/src/bandersnatch.rs b/substrate/primitives/core/src/bandersnatch.rs index 61e7162544a..5cae6047f16 100644 --- a/substrate/primitives/core/src/bandersnatch.rs +++ b/substrate/primitives/core/src/bandersnatch.rs @@ -22,19 +22,18 @@ #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; +#[cfg(feature = "full_crypto")] +use crate::crypto::VrfSecret; use crate::crypto::{ - ByteArray, CryptoType, CryptoTypeId, Derive, Public as TraitPublic, UncheckedFrom, VrfPublic, + ByteArray, CryptoType, CryptoTypeId, Derive, DeriveError, DeriveJunction, Pair as TraitPair, + Public as TraitPublic, SecretStringError, UncheckedFrom, VrfPublic, }; -#[cfg(feature = "full_crypto")] -use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError, VrfSecret}; #[cfg(feature = "serde")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(all(not(feature = "std"), feature = "serde"))] use sp_std::alloc::{format, string::String}; -use bandersnatch_vrfs::CanonicalSerialize; -#[cfg(feature = "full_crypto")] -use bandersnatch_vrfs::SecretKey; +use bandersnatch_vrfs::{CanonicalSerialize, SecretKey}; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use scale_info::TypeInfo; @@ -45,10 +44,8 @@ use sp_std::{vec, vec::Vec}; pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"band"); /// Context used to produce a plain signature without any VRF input/output. -#[cfg(feature = "full_crypto")] pub const SIGNING_CTX: &[u8] = b"BandersnatchSigningContext"; -#[cfg(feature = "full_crypto")] const SEED_SERIALIZED_SIZE: usize = 32; const PUBLIC_SERIALIZED_SIZE: usize = 33; @@ -56,7 +53,6 @@ const SIGNATURE_SERIALIZED_SIZE: usize = 65; const PREOUT_SERIALIZED_SIZE: usize = 33; /// Bandersnatch public key. -#[cfg_attr(feature = "full_crypto", derive(Hash))] #[derive( Clone, Copy, @@ -69,6 +65,7 @@ const PREOUT_SERIALIZED_SIZE: usize = 33; PassByInner, MaxEncodedLen, TypeInfo, + Hash, )] pub struct Public(pub [u8; PUBLIC_SERIALIZED_SIZE]); @@ -116,7 +113,6 @@ impl ByteArray for Public { impl TraitPublic for Public {} impl CryptoType for Public { - #[cfg(feature = "full_crypto")] type Pair = Pair; } @@ -154,8 +150,9 @@ impl<'de> Deserialize<'de> for Public { /// /// The signature is created via the [`VrfSecret::vrf_sign`] using [`SIGNING_CTX`] as transcript /// `label`. -#[cfg_attr(feature = "full_crypto", derive(Hash))] -#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, PassByInner, MaxEncodedLen, TypeInfo)] +#[derive( + Clone, Copy, PartialEq, Eq, Encode, Decode, PassByInner, MaxEncodedLen, TypeInfo, Hash, +)] pub struct Signature([u8; SIGNATURE_SERIALIZED_SIZE]); impl UncheckedFrom<[u8; SIGNATURE_SERIALIZED_SIZE]> for Signature { @@ -194,7 +191,6 @@ impl ByteArray for Signature { } impl CryptoType for Signature { - #[cfg(feature = "full_crypto")] type Pair = Pair; } @@ -211,18 +207,15 @@ impl sp_std::fmt::Debug for Signature { } /// The raw secret seed, which can be used to reconstruct the secret [`Pair`]. -#[cfg(feature = "full_crypto")] type Seed = [u8; SEED_SERIALIZED_SIZE]; /// Bandersnatch secret key. -#[cfg(feature = "full_crypto")] #[derive(Clone)] pub struct Pair { secret: SecretKey, seed: Seed, } -#[cfg(feature = "full_crypto")] impl Pair { /// Get the key seed. pub fn seed(&self) -> Seed { @@ -230,7 +223,6 @@ impl Pair { } } -#[cfg(feature = "full_crypto")] impl TraitPair for Pair { type Seed = Seed; type Public = Public; @@ -287,6 +279,7 @@ impl TraitPair for Pair { /// the constant label [`SIGNING_CTX`] and `data` without any additional data. /// /// See [`vrf::VrfSignData`] for additional details. + #[cfg(feature = "full_crypto")] fn sign(&self, data: &[u8]) -> Signature { let data = vrf::VrfSignData::new_unchecked(SIGNING_CTX, &[data], None); self.vrf_sign(&data).signature @@ -305,7 +298,6 @@ impl TraitPair for Pair { } } -#[cfg(feature = "full_crypto")] impl CryptoType for Pair { type Pair = Pair; } diff --git a/substrate/primitives/core/src/bls.rs b/substrate/primitives/core/src/bls.rs index 0c84d0ba8e6..2256e4cd823 100644 --- a/substrate/primitives/core/src/bls.rs +++ b/substrate/primitives/core/src/bls.rs @@ -25,11 +25,11 @@ #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; -use crate::crypto::{ByteArray, CryptoType, Derive, Public as TraitPublic, UncheckedFrom}; -#[cfg(feature = "full_crypto")] -use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}; +use crate::crypto::{ + ByteArray, CryptoType, Derive, DeriveError, DeriveJunction, Pair as TraitPair, + Public as TraitPublic, SecretStringError, UncheckedFrom, +}; -#[cfg(feature = "full_crypto")] use sp_std::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; @@ -40,9 +40,10 @@ use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(all(not(feature = "std"), feature = "serde"))] use sp_std::alloc::{format, string::String}; -use w3f_bls::{DoublePublicKey, DoubleSignature, EngineBLS, SerializableToBytes, TinyBLS381}; -#[cfg(feature = "full_crypto")] -use w3f_bls::{DoublePublicKeyScheme, Keypair, Message, SecretKey}; +use w3f_bls::{ + DoublePublicKey, DoublePublicKeyScheme, DoubleSignature, EngineBLS, Keypair, Message, + SecretKey, SerializableToBytes, TinyBLS381, +}; use sp_runtime_interface::pass_by::{self, PassBy, PassByInner}; use sp_std::{convert::TryFrom, marker::PhantomData, ops::Deref}; @@ -57,7 +58,6 @@ pub mod bls377 { pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"bls7"); /// BLS12-377 key pair. - #[cfg(feature = "full_crypto")] pub type Pair = super::Pair; /// BLS12-377 public key. pub type Public = super::Public; @@ -79,7 +79,6 @@ pub mod bls381 { pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"bls8"); /// BLS12-381 key pair. - #[cfg(feature = "full_crypto")] pub type Pair = super::Pair; /// BLS12-381 public key. pub type Public = super::Public; @@ -96,7 +95,6 @@ trait BlsBound: EngineBLS + HardJunctionId + Send + Sync + 'static {} impl BlsBound for T {} /// Secret key serialized size -#[cfg(feature = "full_crypto")] const SECRET_KEY_SERIALIZED_SIZE: usize = as SerializableToBytes>::SERIALIZED_BYTES_SIZE; @@ -113,7 +111,6 @@ pub const SIGNATURE_SERIALIZED_SIZE: usize = /// It's not called a "secret key" because ring doesn't expose the secret keys /// of the key pair (yeah, dumb); as such we're forced to remember the seed manually if we /// will need it later (such as for HDKD). -#[cfg(feature = "full_crypto")] type Seed = [u8; SECRET_KEY_SERIALIZED_SIZE]; /// A public key. @@ -150,7 +147,6 @@ impl Ord for Public { } } -#[cfg(feature = "full_crypto")] impl sp_std::hash::Hash for Public { fn hash(&self, state: &mut H) { self.inner.hash(state) @@ -226,7 +222,6 @@ impl From> for [u8; PUBLIC_KEY_SERIALIZED_SIZE] { } } -#[cfg(feature = "full_crypto")] impl From> for Public { fn from(x: Pair) -> Self { x.public() @@ -296,7 +291,6 @@ impl TraitPublic for Public {} impl Derive for Public {} impl CryptoType for Public { - #[cfg(feature = "full_crypto")] type Pair = Pair; } @@ -322,7 +316,6 @@ impl PartialEq for Signature { impl Eq for Signature {} -#[cfg(feature = "full_crypto")] impl sp_std::hash::Hash for Signature { fn hash(&self, state: &mut H) { self.inner.hash(state) @@ -412,15 +405,12 @@ impl UncheckedFrom<[u8; SIGNATURE_SERIALIZED_SIZE]> for Signature { } impl CryptoType for Signature { - #[cfg(feature = "full_crypto")] type Pair = Pair; } /// A key pair. -#[cfg(feature = "full_crypto")] pub struct Pair(Keypair); -#[cfg(feature = "full_crypto")] impl Clone for Pair { fn clone(&self) -> Self { Pair(self.0.clone()) @@ -432,15 +422,12 @@ trait HardJunctionId { } /// Derive a single hard junction. -#[cfg(feature = "full_crypto")] fn derive_hard_junction(secret_seed: &Seed, cc: &[u8; 32]) -> Seed { (T::ID, secret_seed, cc).using_encoded(sp_crypto_hashing::blake2_256) } -#[cfg(feature = "full_crypto")] impl Pair {} -#[cfg(feature = "full_crypto")] impl TraitPair for Pair { type Seed = Seed; type Public = Public; @@ -480,6 +467,7 @@ impl TraitPair for Pair { Self::Public::unchecked_from(raw) } + #[cfg(feature = "full_crypto")] fn sign(&self, message: &[u8]) -> Self::Signature { let mut mutable_self = self.clone(); let r: [u8; SIGNATURE_SERIALIZED_SIZE] = @@ -523,7 +511,6 @@ impl TraitPair for Pair { } } -#[cfg(feature = "full_crypto")] impl CryptoType for Pair { type Pair = Pair; } diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs index 2a8be2a2ba8..fab865d8c1f 100644 --- a/substrate/primitives/core/src/crypto.rs +++ b/substrate/primitives/core/src/crypto.rs @@ -18,7 +18,6 @@ //! Cryptographic utilities. use crate::{ed25519, sr25519}; -#[cfg(feature = "std")] use bip39::{Language, Mnemonic}; use codec::{Decode, Encode, MaxEncodedLen}; #[cfg(feature = "std")] @@ -26,7 +25,6 @@ use itertools::Itertools; #[cfg(feature = "std")] use rand::{rngs::OsRng, RngCore}; use scale_info::TypeInfo; -#[cfg(feature = "std")] pub use secrecy::{ExposeSecret, SecretString}; use sp_runtime_interface::pass_by::PassByInner; #[doc(hidden)] @@ -41,10 +39,7 @@ pub use ss58_registry::{from_known_address_format, Ss58AddressFormat, Ss58Addres /// Trait to zeroize a memory buffer. pub use zeroize::Zeroize; -#[cfg(feature = "std")] -pub use crate::address_uri::AddressUri; -#[cfg(any(feature = "std", feature = "full_crypto"))] -pub use crate::address_uri::Error as AddressUriError; +pub use crate::address_uri::{AddressUri, Error as AddressUriError}; /// The root phrase for our publicly known keys. pub const DEV_PHRASE: &str = @@ -82,7 +77,6 @@ impl> UncheckedInto for S { /// An error with the interpretation of a secret. #[cfg_attr(feature = "std", derive(thiserror::Error))] #[derive(Debug, Clone, PartialEq, Eq)] -#[cfg(feature = "full_crypto")] pub enum SecretStringError { /// The overall format was invalid (e.g. the seed phrase contained symbols). #[cfg_attr(feature = "std", error("Invalid format {0}"))] @@ -104,7 +98,6 @@ pub enum SecretStringError { InvalidPath, } -#[cfg(any(feature = "std", feature = "full_crypto"))] impl From for SecretStringError { fn from(e: AddressUriError) -> Self { Self::InvalidFormat(e) @@ -114,7 +107,6 @@ impl From for SecretStringError { /// An error when deriving a key. #[cfg_attr(feature = "std", derive(thiserror::Error))] #[derive(Debug, Clone, PartialEq, Eq)] -#[cfg(feature = "full_crypto")] pub enum DeriveError { /// A soft key was found in the path (and is unsupported). #[cfg_attr(feature = "std", error("Soft key in path"))] @@ -125,7 +117,6 @@ pub enum DeriveError { /// a new secret key from an existing secret key and, in the case of `SoftRaw` and `SoftIndex` /// a new public key from an existing public key. #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Encode, Decode)] -#[cfg(any(feature = "full_crypto", feature = "serde"))] pub enum DeriveJunction { /// Soft (vanilla) derivation. Public keys have a correspondent derivation. Soft([u8; JUNCTION_ID_LEN]), @@ -133,7 +124,6 @@ pub enum DeriveJunction { Hard([u8; JUNCTION_ID_LEN]), } -#[cfg(any(feature = "full_crypto", feature = "serde"))] impl DeriveJunction { /// Consume self to return a soft derive junction with the same chain code. pub fn soften(self) -> Self { @@ -192,7 +182,6 @@ impl DeriveJunction { } } -#[cfg(any(feature = "full_crypto", feature = "serde"))] impl> From for DeriveJunction { fn from(j: T) -> DeriveJunction { let j = j.as_ref(); @@ -812,7 +801,6 @@ mod dummy { /// assert_eq!("0xe5be9a5092b81bca64be81d212e7f2f9eba183bb7a90954f7b76361f6edb5c0a", suri.phrase.expose_secret()); /// assert!(suri.password.is_none()); /// ``` -#[cfg(feature = "std")] pub struct SecretUri { /// The phrase to derive the private key. /// @@ -824,7 +812,6 @@ pub struct SecretUri { pub junctions: Vec, } -#[cfg(feature = "std")] impl sp_std::str::FromStr for SecretUri { type Err = SecretStringError; @@ -845,7 +832,6 @@ impl sp_std::str::FromStr for SecretUri { /// Trait suitable for typical cryptographic PKI key pair type. /// /// For now it just specifies how to create a key from a phrase and derivation path. -#[cfg(feature = "full_crypto")] pub trait Pair: CryptoType + Sized { /// The type which is used to encode a public key. type Public: Public + Hash; @@ -878,21 +864,19 @@ pub trait Pair: CryptoType + Sized { #[cfg(feature = "std")] fn generate_with_phrase(password: Option<&str>) -> (Self, String, Self::Seed) { let mnemonic = Mnemonic::generate(12).expect("Mnemonic generation always works; qed"); - let phrase = mnemonic.word_iter().join(" "); + let phrase = mnemonic.words().join(" "); let (pair, seed) = Self::from_phrase(&phrase, password) .expect("All phrases generated by Mnemonic are valid; qed"); (pair, phrase.to_owned(), seed) } /// Returns the KeyPair from the English BIP39 seed `phrase`, or an error if it's invalid. - #[cfg(feature = "std")] fn from_phrase( phrase: &str, password: Option<&str>, ) -> Result<(Self, Self::Seed), SecretStringError> { let mnemonic = Mnemonic::parse_in(Language::English, phrase) .map_err(|_| SecretStringError::InvalidPhrase)?; - let (entropy, entropy_len) = mnemonic.to_entropy_array(); let big_seed = substrate_bip39::seed_from_entropy(&entropy[0..entropy_len], password.unwrap_or("")) @@ -928,6 +912,7 @@ pub trait Pair: CryptoType + Sized { fn from_seed_slice(seed: &[u8]) -> Result; /// Sign a message. + #[cfg(feature = "full_crypto")] fn sign(&self, message: &[u8]) -> Self::Signature; /// Verify a signature on a message. Returns true if the signature is good. @@ -962,7 +947,6 @@ pub trait Pair: CryptoType + Sized { /// Notably, integer junction indices may be legally prefixed with arbitrary number of zeros. /// Similarly an empty password (ending the SURI with `///`) is perfectly valid and will /// generally be equivalent to no password at all. - #[cfg(feature = "std")] fn from_string_with_seed( s: &str, password_override: Option<&str>, @@ -996,7 +980,6 @@ pub trait Pair: CryptoType + Sized { /// Interprets the string `s` in order to generate a key pair. /// /// See [`from_string_with_seed`](Pair::from_string_with_seed) for more extensive documentation. - #[cfg(feature = "std")] fn from_string(s: &str, password_override: Option<&str>) -> Result { Self::from_string_with_seed(s, password_override).map(|x| x.0) } @@ -1054,7 +1037,6 @@ where /// Type which has a particular kind of crypto associated with it. pub trait CryptoType { /// The pair key type of this crypto. - #[cfg(feature = "full_crypto")] type Pair: Pair; } diff --git a/substrate/primitives/core/src/ecdsa.rs b/substrate/primitives/core/src/ecdsa.rs index e6bd2c7eb57..fa071e1b03f 100644 --- a/substrate/primitives/core/src/ecdsa.rs +++ b/substrate/primitives/core/src/ecdsa.rs @@ -24,16 +24,13 @@ use sp_runtime_interface::pass_by::PassByInner; #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; use crate::crypto::{ - ByteArray, CryptoType, CryptoTypeId, Derive, Public as TraitPublic, UncheckedFrom, + ByteArray, CryptoType, CryptoTypeId, Derive, DeriveError, DeriveJunction, Pair as TraitPair, + Public as TraitPublic, SecretStringError, UncheckedFrom, }; -#[cfg(feature = "full_crypto")] -use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}; -#[cfg(all(not(feature = "std"), feature = "full_crypto"))] -use k256::ecdsa::SigningKey as SecretKey; #[cfg(not(feature = "std"))] -use k256::ecdsa::VerifyingKey; -#[cfg(all(feature = "std", feature = "full_crypto"))] +use k256::ecdsa::{SigningKey as SecretKey, VerifyingKey}; +#[cfg(feature = "std")] use secp256k1::{ ecdsa::{RecoverableSignature, RecoveryId}, Message, PublicKey, SecretKey, SECP256K1, @@ -42,7 +39,7 @@ use secp256k1::{ use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(all(not(feature = "std"), feature = "serde"))] use sp_std::alloc::{format, string::String}; -#[cfg(feature = "full_crypto")] +#[cfg(not(feature = "std"))] use sp_std::vec::Vec; /// An identifier used to match public keys against ecdsa keys @@ -57,11 +54,9 @@ pub const SIGNATURE_SERIALIZED_SIZE: usize = 65; /// The secret seed. /// /// The raw secret seed, which can be used to create the `Pair`. -#[cfg(feature = "full_crypto")] type Seed = [u8; 32]; /// The ECDSA compressed public key. -#[cfg_attr(feature = "full_crypto", derive(Hash))] #[derive( Clone, Copy, @@ -74,6 +69,7 @@ type Seed = [u8; 32]; PartialEq, PartialOrd, Ord, + Hash, )] pub struct Public(pub [u8; PUBLIC_KEY_SERIALIZED_SIZE]); @@ -221,8 +217,7 @@ impl<'de> Deserialize<'de> for Public { } /// A signature (a 512-bit value, plus 8 bits for recovery ID). -#[cfg_attr(feature = "full_crypto", derive(Hash))] -#[derive(Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq)] +#[derive(Hash, Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq)] pub struct Signature(pub [u8; SIGNATURE_SERIALIZED_SIZE]); impl ByteArray for Signature { @@ -345,13 +340,11 @@ impl Signature { } /// Recover the public key from this signature and a message. - #[cfg(feature = "full_crypto")] pub fn recover>(&self, message: M) -> Option { self.recover_prehashed(&sp_crypto_hashing::blake2_256(message.as_ref())) } /// Recover the public key from this signature and a pre-hashed message. - #[cfg(feature = "full_crypto")] pub fn recover_prehashed(&self, message: &[u8; 32]) -> Option { #[cfg(feature = "std")] { @@ -380,7 +373,7 @@ impl From<(k256::ecdsa::Signature, k256::ecdsa::RecoveryId)> for Signature { } } -#[cfg(all(feature = "std", feature = "full_crypto"))] +#[cfg(feature = "std")] impl From for Signature { fn from(recsig: RecoverableSignature) -> Signature { let mut r = Self::default(); @@ -393,20 +386,17 @@ impl From for Signature { } /// Derive a single hard junction. -#[cfg(feature = "full_crypto")] fn derive_hard_junction(secret_seed: &Seed, cc: &[u8; 32]) -> Seed { ("Secp256k1HDKD", secret_seed, cc).using_encoded(sp_crypto_hashing::blake2_256) } /// A key pair. -#[cfg(feature = "full_crypto")] #[derive(Clone)] pub struct Pair { public: Public, secret: SecretKey, } -#[cfg(feature = "full_crypto")] impl TraitPair for Pair { type Public = Public; type Seed = Seed; @@ -454,6 +444,7 @@ impl TraitPair for Pair { } /// Sign a message. + #[cfg(feature = "full_crypto")] fn sign(&self, message: &[u8]) -> Signature { self.sign_prehashed(&sp_crypto_hashing::blake2_256(message)) } @@ -469,7 +460,6 @@ impl TraitPair for Pair { } } -#[cfg(feature = "full_crypto")] impl Pair { /// Get the seed for this key. pub fn seed(&self) -> Seed { @@ -496,6 +486,7 @@ impl Pair { } /// Sign a pre-hashed message + #[cfg(feature = "full_crypto")] pub fn sign_prehashed(&self, message: &[u8; 32]) -> Signature { #[cfg(feature = "std")] { @@ -550,7 +541,7 @@ impl Pair { // NOTE: this solution is not effective when `Pair` is moved around memory. // The very same problem affects other cryptographic backends that are just using // `zeroize`for their secrets. -#[cfg(all(feature = "std", feature = "full_crypto"))] +#[cfg(feature = "std")] impl Drop for Pair { fn drop(&mut self) { self.secret.non_secure_erase() @@ -558,16 +549,13 @@ impl Drop for Pair { } impl CryptoType for Public { - #[cfg(feature = "full_crypto")] type Pair = Pair; } impl CryptoType for Signature { - #[cfg(feature = "full_crypto")] type Pair = Pair; } -#[cfg(feature = "full_crypto")] impl CryptoType for Pair { type Pair = Pair; } diff --git a/substrate/primitives/core/src/ed25519.rs b/substrate/primitives/core/src/ed25519.rs index 60ebd93e12d..9f42b36dc8b 100644 --- a/substrate/primitives/core/src/ed25519.rs +++ b/substrate/primitives/core/src/ed25519.rs @@ -19,7 +19,6 @@ //! Simple Ed25519 API. // end::description[] -#[cfg(feature = "full_crypto")] use sp_std::vec::Vec; use crate::{ @@ -32,13 +31,11 @@ use scale_info::TypeInfo; #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; use crate::crypto::{ - CryptoType, CryptoTypeId, Derive, FromEntropy, Public as TraitPublic, UncheckedFrom, + CryptoType, CryptoTypeId, Derive, DeriveError, DeriveJunction, FromEntropy, Pair as TraitPair, + Public as TraitPublic, SecretStringError, UncheckedFrom, }; #[cfg(feature = "full_crypto")] -use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}; -#[cfg(feature = "full_crypto")] use core::convert::TryFrom; -#[cfg(feature = "full_crypto")] use ed25519_zebra::{SigningKey, VerificationKey}; #[cfg(feature = "serde")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; @@ -53,11 +50,9 @@ pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ed25"); /// A secret seed. It's not called a "secret key" because ring doesn't expose the secret keys /// of the key pair (yeah, dumb); as such we're forced to remember the seed manually if we /// will need it later (such as for HDKD). -#[cfg(feature = "full_crypto")] type Seed = [u8; 32]; /// A public key. -#[cfg_attr(feature = "full_crypto", derive(Hash))] #[derive( PartialEq, Eq, @@ -70,11 +65,11 @@ type Seed = [u8; 32]; PassByInner, MaxEncodedLen, TypeInfo, + Hash, )] pub struct Public(pub [u8; 32]); /// A key pair. -#[cfg(feature = "full_crypto")] #[derive(Copy, Clone)] pub struct Pair { public: VerificationKey, @@ -210,8 +205,7 @@ impl<'de> Deserialize<'de> for Public { } /// A signature (a 512-bit value). -#[cfg_attr(feature = "full_crypto", derive(Hash))] -#[derive(Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq)] +#[derive(Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq, Hash)] pub struct Signature(pub [u8; 64]); impl TryFrom<&[u8]> for Signature { @@ -370,12 +364,10 @@ impl TraitPublic for Public {} impl Derive for Public {} /// Derive a single hard junction. -#[cfg(feature = "full_crypto")] fn derive_hard_junction(secret_seed: &Seed, cc: &[u8; 32]) -> Seed { ("Ed25519HDKD", secret_seed, cc).using_encoded(sp_crypto_hashing::blake2_256) } -#[cfg(feature = "full_crypto")] impl TraitPair for Pair { type Public = Public; type Seed = Seed; @@ -414,6 +406,7 @@ impl TraitPair for Pair { } /// Sign a message. + #[cfg(feature = "full_crypto")] fn sign(&self, message: &[u8]) -> Signature { Signature::from_raw(self.secret.sign(message).into()) } @@ -433,7 +426,6 @@ impl TraitPair for Pair { } } -#[cfg(feature = "full_crypto")] impl Pair { /// Get the seed for this key. pub fn seed(&self) -> Seed { @@ -454,16 +446,13 @@ impl Pair { } impl CryptoType for Public { - #[cfg(feature = "full_crypto")] type Pair = Pair; } impl CryptoType for Signature { - #[cfg(feature = "full_crypto")] type Pair = Pair; } -#[cfg(feature = "full_crypto")] impl CryptoType for Pair { type Pair = Pair; } diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index 0d43eea9962..c0b41234460 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -46,7 +46,6 @@ pub use sp_debug_derive::RuntimeDebug; #[cfg(feature = "serde")] pub use impl_serde::serialize as bytes; -#[cfg(feature = "full_crypto")] #[deprecated( since = "27.0.0", note = "`sp-crypto-hashing` re-exports will be removed after June 2024. Use `sp-crypto-hashing` instead." @@ -58,7 +57,6 @@ pub mod crypto; pub mod hexdisplay; pub use paste; -#[cfg(any(feature = "full_crypto", feature = "std"))] mod address_uri; #[cfg(feature = "bandersnatch-experimental")] pub mod bandersnatch; @@ -87,7 +85,6 @@ pub use self::{ hash::{convert_hash, H160, H256, H512}, uint::{U256, U512}, }; -#[cfg(feature = "full_crypto")] pub use crypto::{ByteArray, DeriveJunction, Pair, Public}; #[cfg(feature = "std")] diff --git a/substrate/primitives/core/src/paired_crypto.rs b/substrate/primitives/core/src/paired_crypto.rs index 20b32c339bd..6d2e6ddf334 100644 --- a/substrate/primitives/core/src/paired_crypto.rs +++ b/substrate/primitives/core/src/paired_crypto.rs @@ -19,11 +19,11 @@ #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; -use crate::crypto::{ByteArray, CryptoType, Derive, Public as PublicT, UncheckedFrom}; -#[cfg(feature = "full_crypto")] -use crate::crypto::{DeriveError, DeriveJunction, Pair as PairT, SecretStringError}; +use crate::crypto::{ + ByteArray, CryptoType, Derive, DeriveError, DeriveJunction, Pair as PairT, Public as PublicT, + SecretStringError, UncheckedFrom, +}; -#[cfg(feature = "full_crypto")] use sp_std::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; @@ -39,12 +39,11 @@ use sp_std::convert::TryFrom; /// ECDSA and BLS12-377 paired crypto scheme #[cfg(feature = "bls-experimental")] pub mod ecdsa_bls377 { + use crate::{bls377, crypto::CryptoTypeId, ecdsa}; #[cfg(feature = "full_crypto")] - use crate::Hasher; use crate::{ - bls377, - crypto::{CryptoTypeId, Pair as PairT, UncheckedFrom}, - ecdsa, + crypto::{Pair as PairT, UncheckedFrom}, + Hasher, }; /// An identifier used to match public keys against BLS12-377 keys @@ -56,7 +55,6 @@ pub mod ecdsa_bls377 { ecdsa::SIGNATURE_SERIALIZED_SIZE + bls377::SIGNATURE_SERIALIZED_SIZE; /// (ECDSA,BLS12-377) key-pair pair. - #[cfg(feature = "full_crypto")] pub type Pair = super::Pair; /// (ECDSA,BLS12-377) public key pair. pub type Public = super::Public; @@ -64,16 +62,13 @@ pub mod ecdsa_bls377 { pub type Signature = super::Signature; impl super::CryptoType for Public { - #[cfg(feature = "full_crypto")] type Pair = Pair; } impl super::CryptoType for Signature { - #[cfg(feature = "full_crypto")] type Pair = Pair; } - #[cfg(feature = "full_crypto")] impl super::CryptoType for Pair { type Pair = Pair; } @@ -136,7 +131,6 @@ pub mod ecdsa_bls377 { /// Secure seed length. /// /// Currently only supporting sub-schemes whose seed is a 32-bytes array. -#[cfg(feature = "full_crypto")] const SECURE_SEED_LEN: usize = 32; /// A secret seed. @@ -144,14 +138,12 @@ const SECURE_SEED_LEN: usize = 32; /// It's not called a "secret key" because ring doesn't expose the secret keys /// of the key pair (yeah, dumb); as such we're forced to remember the seed manually if we /// will need it later (such as for HDKD). -#[cfg(feature = "full_crypto")] type Seed = [u8; SECURE_SEED_LEN]; /// A public key. #[derive(Clone, Encode, Decode, MaxEncodedLen, TypeInfo, PartialEq, Eq, PartialOrd, Ord)] pub struct Public([u8; LEFT_PLUS_RIGHT_LEN]); -#[cfg(feature = "full_crypto")] impl sp_std::hash::Hash for Public { fn hash(&self, state: &mut H) { self.0.hash(state); @@ -215,7 +207,6 @@ impl PassBy for Public { type PassBy = pass_by::Inner; } -#[cfg(feature = "full_crypto")] impl< LeftPair: PairT, RightPair: PairT, @@ -311,7 +302,6 @@ impl SignatureBound for T {} #[derive(Clone, Encode, Decode, MaxEncodedLen, TypeInfo, PartialEq, Eq)] pub struct Signature([u8; LEFT_PLUS_RIGHT_LEN]); -#[cfg(feature = "full_crypto")] impl sp_std::hash::Hash for Signature { fn hash(&self, state: &mut H) { self.0.hash(state); @@ -411,7 +401,6 @@ impl UncheckedFrom<[u8; LEFT_PLUS_RIGHT_LEN]> } /// A key pair. -#[cfg(feature = "full_crypto")] #[derive(Clone)] pub struct Pair< LeftPair: PairT, @@ -423,7 +412,6 @@ pub struct Pair< right: RightPair, } -#[cfg(feature = "full_crypto")] impl< LeftPair: PairT, RightPair: PairT, @@ -483,6 +471,7 @@ where Self::Public::unchecked_from(raw) } + #[cfg(feature = "full_crypto")] fn sign(&self, message: &[u8]) -> Self::Signature { let mut raw: [u8; SIGNATURE_LEN] = [0u8; SIGNATURE_LEN]; raw[..LeftPair::Signature::LEN].copy_from_slice(self.left.sign(message).as_ref()); diff --git a/substrate/primitives/core/src/sr25519.rs b/substrate/primitives/core/src/sr25519.rs index 7c02afc3cd5..6c04eb8def1 100644 --- a/substrate/primitives/core/src/sr25519.rs +++ b/substrate/primitives/core/src/sr25519.rs @@ -19,20 +19,14 @@ //! //! Note: `CHAIN_CODE_LENGTH` must be equal to `crate::crypto::JUNCTION_ID_LEN` //! for this to work. -#[cfg(any(feature = "full_crypto", feature = "serde"))] -use crate::crypto::DeriveJunction; #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; +use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}; #[cfg(feature = "full_crypto")] -use crate::crypto::{DeriveError, Pair as TraitPair, SecretStringError}; -#[cfg(feature = "full_crypto")] -use schnorrkel::{ - derive::CHAIN_CODE_LENGTH, signing_context, ExpansionMode, Keypair, MiniSecretKey, SecretKey, -}; -#[cfg(any(feature = "full_crypto", feature = "serde"))] +use schnorrkel::signing_context; use schnorrkel::{ - derive::{ChainCode, Derivation}, - PublicKey, + derive::{ChainCode, Derivation, CHAIN_CODE_LENGTH}, + ExpansionMode, Keypair, MiniSecretKey, PublicKey, SecretKey, }; use sp_std::vec::Vec; @@ -47,7 +41,6 @@ use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_std::ops::Deref; -#[cfg(feature = "full_crypto")] use schnorrkel::keys::{MINI_SECRET_KEY_LENGTH, SECRET_KEY_LENGTH}; #[cfg(feature = "serde")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; @@ -56,14 +49,12 @@ use sp_runtime_interface::pass_by::PassByInner; use sp_std::alloc::{format, string::String}; // signing context -#[cfg(feature = "full_crypto")] const SIGNING_CTX: &[u8] = b"substrate"; /// An identifier used to match public keys against sr25519 keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"sr25"); /// An Schnorrkel/Ristretto x25519 ("sr25519") public key. -#[cfg_attr(feature = "full_crypto", derive(Hash))] #[derive( PartialEq, Eq, @@ -76,14 +67,13 @@ pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"sr25"); PassByInner, MaxEncodedLen, TypeInfo, + Hash, )] pub struct Public(pub [u8; 32]); /// An Schnorrkel/Ristretto x25519 ("sr25519") key pair. -#[cfg(feature = "full_crypto")] pub struct Pair(Keypair); -#[cfg(feature = "full_crypto")] impl Clone for Pair { fn clone(&self) -> Self { Pair(schnorrkel::Keypair { @@ -216,8 +206,7 @@ impl<'de> Deserialize<'de> for Public { } /// An Schnorrkel/Ristretto x25519 ("sr25519") signature. -#[cfg_attr(feature = "full_crypto", derive(Hash))] -#[derive(Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq)] +#[derive(Encode, Decode, MaxEncodedLen, PassByInner, TypeInfo, PartialEq, Eq, Hash)] pub struct Signature(pub [u8; 64]); impl TryFrom<&[u8]> for Signature { @@ -435,16 +424,13 @@ impl AsRef for Pair { } /// Derive a single hard junction. -#[cfg(feature = "full_crypto")] fn derive_hard_junction(secret: &SecretKey, cc: &[u8; CHAIN_CODE_LENGTH]) -> MiniSecretKey { secret.hard_derive_mini_secret_key(Some(ChainCode(*cc)), b"").0 } /// The raw secret seed, which can be used to recreate the `Pair`. -#[cfg(feature = "full_crypto")] type Seed = [u8; MINI_SECRET_KEY_LENGTH]; -#[cfg(feature = "full_crypto")] impl TraitPair for Pair { type Public = Public; type Seed = Seed; @@ -499,6 +485,7 @@ impl TraitPair for Pair { Ok((Self(result.into()), seed.map(|s| MiniSecretKey::to_bytes(&s)))) } + #[cfg(feature = "full_crypto")] fn sign(&self, message: &[u8]) -> Signature { let context = signing_context(SIGNING_CTX); self.0.sign(context.bytes(message)).into() @@ -533,16 +520,13 @@ impl Pair { } impl CryptoType for Public { - #[cfg(feature = "full_crypto")] type Pair = Pair; } impl CryptoType for Signature { - #[cfg(feature = "full_crypto")] type Pair = Pair; } -#[cfg(feature = "full_crypto")] impl CryptoType for Pair { type Pair = Pair; } diff --git a/substrate/primitives/keyring/Cargo.toml b/substrate/primitives/keyring/Cargo.toml index 1c936b6685b..940fe90916d 100644 --- a/substrate/primitives/keyring/Cargo.toml +++ b/substrate/primitives/keyring/Cargo.toml @@ -18,10 +18,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] strum = { version = "0.24.1", features = ["derive"], default-features = false } -sp-core = { path = "../core" } -sp-runtime = { path = "../runtime" } +sp-core = { path = "../core", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } [features] +default = ["std"] +std = ["sp-core/std", "sp-runtime/std", "strum/std"] + # This feature adds Bandersnatch crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. diff --git a/substrate/primitives/keyring/check-features-variants.sh b/substrate/primitives/keyring/check-features-variants.sh new file mode 100755 index 00000000000..9c28d835894 --- /dev/null +++ b/substrate/primitives/keyring/check-features-variants.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env -S bash -eux + +export RUSTFLAGS="-Cdebug-assertions=y -Dwarnings" +T=wasm32-unknown-unknown + +cargo check --release +cargo check --release --features="bandersnatch-experimental" +cargo check --release --target=$T --no-default-features diff --git a/substrate/primitives/keyring/src/bandersnatch.rs b/substrate/primitives/keyring/src/bandersnatch.rs index eb60f856327..67fc5c47df6 100644 --- a/substrate/primitives/keyring/src/bandersnatch.rs +++ b/substrate/primitives/keyring/src/bandersnatch.rs @@ -18,14 +18,21 @@ //! A set of well-known keys used for testing. pub use sp_core::bandersnatch; +#[cfg(feature = "std")] +use sp_core::bandersnatch::Signature; use sp_core::{ - bandersnatch::{Pair, Public, Signature}, + bandersnatch::{Pair, Public}, crypto::UncheckedFrom, hex2array, ByteArray, Pair as PairT, }; +extern crate alloc; +use alloc::{fmt, format, str::FromStr, string::String, vec::Vec}; + /// Set of test accounts. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] +#[derive( + Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter, Ord, PartialOrd, +)] pub enum Keyring { Alice, Bob, @@ -56,6 +63,7 @@ impl Keyring { Public::from(self).to_raw_vec() } + #[cfg(feature = "std")] pub fn sign(self, msg: &[u8]) -> Signature { Pair::from(self).sign(msg) } @@ -102,16 +110,16 @@ impl From for &'static str { #[derive(Debug)] pub struct ParseKeyringError; -impl std::fmt::Display for ParseKeyringError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for ParseKeyringError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "ParseKeyringError") } } -impl std::str::FromStr for Keyring { +impl FromStr for Keyring { type Err = ParseKeyringError; - fn from_str(s: &str) -> Result::Err> { + fn from_str(s: &str) -> Result::Err> { match s { "Alice" => Ok(Keyring::Alice), "Bob" => Ok(Keyring::Bob), diff --git a/substrate/primitives/keyring/src/ed25519.rs b/substrate/primitives/keyring/src/ed25519.rs index ade42b29494..98ca368e53c 100644 --- a/substrate/primitives/keyring/src/ed25519.rs +++ b/substrate/primitives/keyring/src/ed25519.rs @@ -18,14 +18,21 @@ //! Support code for the runtime. A set of test accounts. pub use sp_core::ed25519; +#[cfg(feature = "std")] +use sp_core::ed25519::Signature; use sp_core::{ - ed25519::{Pair, Public, Signature}, + ed25519::{Pair, Public}, hex2array, ByteArray, Pair as PairT, H256, }; use sp_runtime::AccountId32; +extern crate alloc; +use alloc::{format, string::String, vec::Vec}; + /// Set of test accounts. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] +#[derive( + Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter, Ord, PartialOrd, +)] pub enum Keyring { Alice, Bob, @@ -76,6 +83,7 @@ impl Keyring { self.to_raw_public().into() } + #[cfg(feature = "std")] pub fn sign(self, msg: &[u8]) -> Signature { Pair::from(self).sign(msg) } diff --git a/substrate/primitives/keyring/src/lib.rs b/substrate/primitives/keyring/src/lib.rs index ee7fd56ba11..f753bf4b0dd 100644 --- a/substrate/primitives/keyring/src/lib.rs +++ b/substrate/primitives/keyring/src/lib.rs @@ -17,6 +17,8 @@ //! Support code for the runtime. A set of test accounts. +#![cfg_attr(not(feature = "std"), no_std)] + /// Test account crypto for sr25519. pub mod sr25519; diff --git a/substrate/primitives/keyring/src/sr25519.rs b/substrate/primitives/keyring/src/sr25519.rs index 1c2a2526efb..a3a506152d7 100644 --- a/substrate/primitives/keyring/src/sr25519.rs +++ b/substrate/primitives/keyring/src/sr25519.rs @@ -18,15 +18,22 @@ //! Support code for the runtime. A set of test accounts. pub use sp_core::sr25519; +#[cfg(feature = "std")] +use sp_core::sr25519::Signature; use sp_core::{ hex2array, - sr25519::{Pair, Public, Signature}, + sr25519::{Pair, Public}, ByteArray, Pair as PairT, H256, }; use sp_runtime::AccountId32; +extern crate alloc; +use alloc::{fmt, format, str::FromStr, string::String, vec::Vec}; + /// Set of test accounts. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] +#[derive( + Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter, Ord, PartialOrd, +)] pub enum Keyring { Alice, Bob, @@ -77,6 +84,7 @@ impl Keyring { self.to_raw_public().into() } + #[cfg(feature = "std")] pub fn sign(self, msg: &[u8]) -> Signature { Pair::from(self).sign(msg) } @@ -140,16 +148,16 @@ impl From for sp_runtime::MultiSigner { #[derive(Debug)] pub struct ParseKeyringError; -impl std::fmt::Display for ParseKeyringError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for ParseKeyringError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "ParseKeyringError") } } -impl std::str::FromStr for Keyring { +impl FromStr for Keyring { type Err = ParseKeyringError; - fn from_str(s: &str) -> Result::Err> { + fn from_str(s: &str) -> Result::Err> { match s { "alice" => Ok(Keyring::Alice), "bob" => Ok(Keyring::Bob), diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 3bba5cd5bf0..b6e6346e801 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -24,9 +24,9 @@ sp-block-builder = { path = "../../primitives/block-builder", default-features = codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-inherents = { path = "../../primitives/inherents", default-features = false } -sp-keyring = { path = "../../primitives/keyring", optional = true } +sp-keyring = { path = "../../primitives/keyring", default-features = false } sp-offchain = { path = "../../primitives/offchain", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } sp-crypto-hashing = { path = "../../primitives/crypto/hashing", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } @@ -99,7 +99,7 @@ std = [ "sp-genesis-builder/std", "sp-inherents/std", "sp-io/std", - "sp-keyring", + "sp-keyring/std", "sp-offchain/std", "sp-runtime/std", "sp-session/std", -- GitLab From 82f3c3e2e81d3ea4b9bc35a14432d3ab93fb5b52 Mon Sep 17 00:00:00 2001 From: gupnik Date: Wed, 13 Mar 2024 12:31:01 +0530 Subject: [PATCH 86/86] Construct Runtime v2 (#1378) Moved from https://github.com/paritytech/substrate/pull/14788 ---- Fixes https://github.com/paritytech/polkadot-sdk/issues/232 This PR introduces outer-macro approach for `construct_runtime` as discussed in the linked issue. It looks like the following: ```rust #[frame_support::runtime] mod runtime { #[runtime::runtime] #[runtime::derive( RuntimeCall, RuntimeEvent, RuntimeError, RuntimeOrigin, RuntimeFreezeReason, RuntimeHoldReason, RuntimeSlashReason, RuntimeLockId, RuntimeTask, )] pub struct Runtime; #[runtime::pallet_index(0)] pub type System = frame_system; #[runtime::pallet_index(1)] pub type Timestamp = pallet_timestamp; #[runtime::pallet_index(2)] pub type Aura = pallet_aura; #[runtime::pallet_index(3)] pub type Grandpa = pallet_grandpa; #[runtime::pallet_index(4)] pub type Balances = pallet_balances; #[runtime::pallet_index(5)] pub type TransactionPayment = pallet_transaction_payment; #[runtime::pallet_index(6)] pub type Sudo = pallet_sudo; // Include the custom logic from the pallet-template in the runtime. #[runtime::pallet_index(7)] pub type TemplateModule = pallet_template; } ``` ## Features - `#[runtime::runtime]` attached to a struct defines the main runtime - `#[runtime::derive]` attached to this struct defines the types generated by runtime - `#[runtime::pallet_index]` must be attached to a pallet to define its index - `#[runtime::disable_call]` can be optionally attached to a pallet to disable its calls - `#[runtime::disable_unsigned]` can be optionally attached to a pallet to disable unsigned calls - A pallet instance can be defined as `TemplateModule: pallet_template` - An optional attribute can be defined as `#[frame_support::runtime(legacy_ordering)]` to ensure that the order of hooks is same as the order of pallets (and not based on the pallet_index). This is to support legacy runtimes and should be avoided for new ones. ## Todo - [x] Update the latest syntax in kitchensink and tests - [x] Update UI tests - [x] Docs ## Extension - Abstract away the Executive similar to https://github.com/paritytech/substrate/pull/14742 - Optionally avoid the need to specify all runtime types (TBD) --------- Co-authored-by: Francisco Aguirre Co-authored-by: Nikhil Gupta <> --- prdoc/pr_1378.prdoc | 27 + substrate/bin/node/runtime/Cargo.toml | 2 +- substrate/bin/node/runtime/src/lib.rs | 342 ++++-- substrate/frame/support/Cargo.toml | 4 +- substrate/frame/support/procedural/Cargo.toml | 1 + .../procedural/src/construct_runtime/mod.rs | 15 +- substrate/frame/support/procedural/src/lib.rs | 68 ++ .../src/pallet/expand/tt_default_parts.rs | 74 ++ .../procedural/src/pallet/parse/helper.rs | 6 + .../procedural/src/runtime/expand/mod.rs | 320 ++++++ .../support/procedural/src/runtime/mod.rs | 236 +++++ .../procedural/src/runtime/parse/helper.rs | 37 + .../procedural/src/runtime/parse/mod.rs | 266 +++++ .../procedural/src/runtime/parse/pallet.rs | 99 ++ .../src/runtime/parse/pallet_decl.rs | 60 ++ .../src/runtime/parse/runtime_struct.rs | 35 + .../src/runtime/parse/runtime_types.rs | 76 ++ substrate/frame/support/src/lib.rs | 3 + substrate/frame/support/test/Cargo.toml | 2 +- .../{construct_runtime.rs => runtime.rs} | 282 +++-- .../test/tests/runtime_legacy_ordering.rs | 993 ++++++++++++++++++ .../frame/support/test/tests/runtime_ui.rs | 36 + .../runtime_ui/can_only_be_attached_to_mod.rs | 21 + .../can_only_be_attached_to_mod.stderr | 5 + .../runtime_ui/conflicting_pallet_index.rs | 43 + .../conflicting_pallet_index.stderr | 11 + .../runtime_ui/conflicting_pallet_name.rs | 43 + .../runtime_ui/conflicting_pallet_name.stderr | 11 + .../tests/runtime_ui/invalid_attribute.rs | 23 + .../tests/runtime_ui/invalid_attribute.stderr | 5 + .../tests/runtime_ui/invalid_pallet_index.rs | 28 + .../runtime_ui/invalid_pallet_index.stderr | 5 + .../runtime_ui/invalid_runtime_type_derive.rs | 25 + .../invalid_runtime_type_derive.stderr | 5 + .../test/tests/runtime_ui/missing_runtime.rs | 21 + .../tests/runtime_ui/missing_runtime.stderr | 5 + .../missing_runtime_types_derive.rs | 24 + .../missing_runtime_types_derive.stderr | 5 + .../tests/runtime_ui/missing_system_pallet.rs | 25 + .../runtime_ui/missing_system_pallet.stderr | 5 + .../test/tests/runtime_ui/pass/basic.rs | 37 + .../test/tests/runtime_ui/runtime_struct.rs | 24 + .../tests/runtime_ui/runtime_struct.stderr | 5 + templates/solochain/runtime/Cargo.toml | 2 +- templates/solochain/runtime/src/lib.rs | 55 +- 45 files changed, 3212 insertions(+), 205 deletions(-) create mode 100644 prdoc/pr_1378.prdoc create mode 100644 substrate/frame/support/procedural/src/runtime/expand/mod.rs create mode 100644 substrate/frame/support/procedural/src/runtime/mod.rs create mode 100644 substrate/frame/support/procedural/src/runtime/parse/helper.rs create mode 100644 substrate/frame/support/procedural/src/runtime/parse/mod.rs create mode 100644 substrate/frame/support/procedural/src/runtime/parse/pallet.rs create mode 100644 substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs create mode 100644 substrate/frame/support/procedural/src/runtime/parse/runtime_struct.rs create mode 100644 substrate/frame/support/procedural/src/runtime/parse/runtime_types.rs rename substrate/frame/support/test/tests/{construct_runtime.rs => runtime.rs} (89%) create mode 100644 substrate/frame/support/test/tests/runtime_legacy_ordering.rs create mode 100644 substrate/frame/support/test/tests/runtime_ui.rs create mode 100644 substrate/frame/support/test/tests/runtime_ui/can_only_be_attached_to_mod.rs create mode 100644 substrate/frame/support/test/tests/runtime_ui/can_only_be_attached_to_mod.stderr create mode 100644 substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_index.rs create mode 100644 substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_index.stderr create mode 100644 substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_name.rs create mode 100644 substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_name.stderr create mode 100644 substrate/frame/support/test/tests/runtime_ui/invalid_attribute.rs create mode 100644 substrate/frame/support/test/tests/runtime_ui/invalid_attribute.stderr create mode 100644 substrate/frame/support/test/tests/runtime_ui/invalid_pallet_index.rs create mode 100644 substrate/frame/support/test/tests/runtime_ui/invalid_pallet_index.stderr create mode 100644 substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.rs create mode 100644 substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.stderr create mode 100644 substrate/frame/support/test/tests/runtime_ui/missing_runtime.rs create mode 100644 substrate/frame/support/test/tests/runtime_ui/missing_runtime.stderr create mode 100644 substrate/frame/support/test/tests/runtime_ui/missing_runtime_types_derive.rs create mode 100644 substrate/frame/support/test/tests/runtime_ui/missing_runtime_types_derive.stderr create mode 100644 substrate/frame/support/test/tests/runtime_ui/missing_system_pallet.rs create mode 100644 substrate/frame/support/test/tests/runtime_ui/missing_system_pallet.stderr create mode 100644 substrate/frame/support/test/tests/runtime_ui/pass/basic.rs create mode 100644 substrate/frame/support/test/tests/runtime_ui/runtime_struct.rs create mode 100644 substrate/frame/support/test/tests/runtime_ui/runtime_struct.stderr diff --git a/prdoc/pr_1378.prdoc b/prdoc/pr_1378.prdoc new file mode 100644 index 00000000000..6533dcb6630 --- /dev/null +++ b/prdoc/pr_1378.prdoc @@ -0,0 +1,27 @@ +title: Construct Runtime V2 - An outer macro approach to define the runtime + +doc: + - audience: Runtime Dev + description: | + Introduces `#[frame_support::runtime]` that can be attached to a mod to define a runtime. The items + in this mod can be attached to the following attributes to define the key components of the runtime. + 1. `#[runtime::runtime]` attached to a struct defines the main runtime + 2. `#[runtime::derive]` attached to the runtime struct defines the types generated by the runtime + 3. `#[runtime::pallet_index]` must be attached to a pallet to define its index + 4. `#[runtime::disable_call]` can be optionally attached to a pallet to disable its calls + 5. `#[runtime::disable_unsigned]` can be optionally attached to a pallet to disable unsigned calls + 6. A pallet instance can be defined as `TemplateModule: pallet_template` + An optional attribute can be defined as `#[frame_support::runtime(legacy_ordering)]` to ensure that + the order of hooks is same as the order of pallets (and not based on the pallet_index). This is to support + legacy runtimes and should be avoided for new ones. + +migrations: + db: [] + + runtime: [] + +crates: + - name: frame-support + - name: frame-support-procedural + +host_functions: [] diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index bdef42474d0..09c8fb4ed3d 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -58,7 +58,7 @@ sp-io = { path = "../../../primitives/io", default-features = false } frame-executive = { path = "../../../frame/executive", default-features = false } frame-benchmarking = { path = "../../../frame/benchmarking", default-features = false } frame-benchmarking-pallet-pov = { path = "../../../frame/benchmarking/pov", default-features = false } -frame-support = { path = "../../../frame/support", default-features = false, features = ["tuples-96"] } +frame-support = { path = "../../../frame/support", default-features = false, features = ["experimental", "tuples-96"] } frame-system = { path = "../../../frame/system", default-features = false } frame-system-benchmarking = { path = "../../../frame/system/benchmarking", default-features = false, optional = true } frame-election-provider-support = { path = "../../../frame/election-provider-support", default-features = false } diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 18ce13cff80..96611f5a144 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -28,7 +28,7 @@ use frame_election_provider_support::{ onchain, BalancingConfig, ElectionDataProvider, SequentialPhragmen, VoteWeight, }; use frame_support::{ - construct_runtime, derive_impl, + derive_impl, dispatch::DispatchClass, dynamic_params::{dynamic_pallet_params, dynamic_params}, genesis_builder_helper::{build_config, create_default_config}, @@ -2196,92 +2196,260 @@ impl pallet_parameters::Config for Runtime { type WeightInfo = (); } -construct_runtime!( - pub enum Runtime { - System: frame_system, - Utility: pallet_utility, - Babe: pallet_babe, - Timestamp: pallet_timestamp, - // Authorship must be before session in order to note author in the correct session and era - // for im-online and staking. - Authorship: pallet_authorship, - Indices: pallet_indices, - Balances: pallet_balances, - TransactionPayment: pallet_transaction_payment, - AssetTxPayment: pallet_asset_tx_payment, - AssetConversionTxPayment: pallet_asset_conversion_tx_payment, - ElectionProviderMultiPhase: pallet_election_provider_multi_phase, - Staking: pallet_staking, - Session: pallet_session, - Democracy: pallet_democracy, - Council: pallet_collective::, - TechnicalCommittee: pallet_collective::, - Elections: pallet_elections_phragmen, - TechnicalMembership: pallet_membership::, - Grandpa: pallet_grandpa, - Treasury: pallet_treasury, - AssetRate: pallet_asset_rate, - Contracts: pallet_contracts, - Sudo: pallet_sudo, - ImOnline: pallet_im_online, - AuthorityDiscovery: pallet_authority_discovery, - Offences: pallet_offences, - Historical: pallet_session_historical, - RandomnessCollectiveFlip: pallet_insecure_randomness_collective_flip, - Identity: pallet_identity, - Society: pallet_society, - Recovery: pallet_recovery, - Vesting: pallet_vesting, - Scheduler: pallet_scheduler, - Glutton: pallet_glutton, - Preimage: pallet_preimage, - Proxy: pallet_proxy, - Multisig: pallet_multisig, - Bounties: pallet_bounties, - Tips: pallet_tips, - Assets: pallet_assets::, - PoolAssets: pallet_assets::, - Beefy: pallet_beefy, - // MMR leaf construction must be after session in order to have a leaf's next_auth_set - // refer to block. See issue polkadot-fellows/runtimes#160 for details. - Mmr: pallet_mmr, - MmrLeaf: pallet_beefy_mmr, - Lottery: pallet_lottery, - Nis: pallet_nis, - Uniques: pallet_uniques, - Nfts: pallet_nfts, - NftFractionalization: pallet_nft_fractionalization, - Salary: pallet_salary, - CoreFellowship: pallet_core_fellowship, - TransactionStorage: pallet_transaction_storage, - VoterList: pallet_bags_list::, - StateTrieMigration: pallet_state_trie_migration, - ChildBounties: pallet_child_bounties, - Referenda: pallet_referenda, - Remark: pallet_remark, - RootTesting: pallet_root_testing, - ConvictionVoting: pallet_conviction_voting, - Whitelist: pallet_whitelist, - AllianceMotion: pallet_collective::, - Alliance: pallet_alliance, - NominationPools: pallet_nomination_pools, - RankedPolls: pallet_referenda::, - RankedCollective: pallet_ranked_collective, - AssetConversion: pallet_asset_conversion, - FastUnstake: pallet_fast_unstake, - MessageQueue: pallet_message_queue, - Pov: frame_benchmarking_pallet_pov, - TxPause: pallet_tx_pause, - SafeMode: pallet_safe_mode, - Statement: pallet_statement, - MultiBlockMigrations: pallet_migrations, - Broker: pallet_broker, - TasksExample: pallet_example_tasks, - Mixnet: pallet_mixnet, - Parameters: pallet_parameters, - SkipFeelessPayment: pallet_skip_feeless_payment, - } -); +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = frame_system; + + #[runtime::pallet_index(1)] + pub type Utility = pallet_utility; + + #[runtime::pallet_index(2)] + pub type Babe = pallet_babe; + + #[runtime::pallet_index(3)] + pub type Timestamp = pallet_timestamp; + + // Authorship must be before session in order to note author in the correct session and era + // for im-online and staking. + #[runtime::pallet_index(4)] + pub type Authorship = pallet_authorship; + + #[runtime::pallet_index(5)] + pub type Indices = pallet_indices; + + #[runtime::pallet_index(6)] + pub type Balances = pallet_balances; + + #[runtime::pallet_index(7)] + pub type TransactionPayment = pallet_transaction_payment; + + #[runtime::pallet_index(8)] + pub type AssetTxPayment = pallet_asset_tx_payment; + + #[runtime::pallet_index(9)] + pub type AssetConversionTxPayment = pallet_asset_conversion_tx_payment; + + #[runtime::pallet_index(10)] + pub type ElectionProviderMultiPhase = pallet_election_provider_multi_phase; + + #[runtime::pallet_index(11)] + pub type Staking = pallet_staking; + + #[runtime::pallet_index(12)] + pub type Session = pallet_session; + + #[runtime::pallet_index(13)] + pub type Democracy = pallet_democracy; + + #[runtime::pallet_index(14)] + pub type Council = pallet_collective; + + #[runtime::pallet_index(15)] + pub type TechnicalCommittee = pallet_collective; + + #[runtime::pallet_index(16)] + pub type Elections = pallet_elections_phragmen; + + #[runtime::pallet_index(17)] + pub type TechnicalMembership = pallet_membership; + + #[runtime::pallet_index(18)] + pub type Grandpa = pallet_grandpa; + + #[runtime::pallet_index(19)] + pub type Treasury = pallet_treasury; + + #[runtime::pallet_index(20)] + pub type AssetRate = pallet_asset_rate; + + #[runtime::pallet_index(21)] + pub type Contracts = pallet_contracts; + + #[runtime::pallet_index(22)] + pub type Sudo = pallet_sudo; + + #[runtime::pallet_index(23)] + pub type ImOnline = pallet_im_online; + + #[runtime::pallet_index(24)] + pub type AuthorityDiscovery = pallet_authority_discovery; + + #[runtime::pallet_index(25)] + pub type Offences = pallet_offences; + + #[runtime::pallet_index(26)] + pub type Historical = pallet_session_historical; + + #[runtime::pallet_index(27)] + pub type RandomnessCollectiveFlip = pallet_insecure_randomness_collective_flip; + + #[runtime::pallet_index(28)] + pub type Identity = pallet_identity; + + #[runtime::pallet_index(29)] + pub type Society = pallet_society; + + #[runtime::pallet_index(30)] + pub type Recovery = pallet_recovery; + + #[runtime::pallet_index(31)] + pub type Vesting = pallet_vesting; + + #[runtime::pallet_index(32)] + pub type Scheduler = pallet_scheduler; + + #[runtime::pallet_index(33)] + pub type Glutton = pallet_glutton; + + #[runtime::pallet_index(34)] + pub type Preimage = pallet_preimage; + + #[runtime::pallet_index(35)] + pub type Proxy = pallet_proxy; + + #[runtime::pallet_index(36)] + pub type Multisig = pallet_multisig; + + #[runtime::pallet_index(37)] + pub type Bounties = pallet_bounties; + + #[runtime::pallet_index(38)] + pub type Tips = pallet_tips; + + #[runtime::pallet_index(39)] + pub type Assets = pallet_assets; + + #[runtime::pallet_index(40)] + pub type PoolAssets = pallet_assets; + + #[runtime::pallet_index(41)] + pub type Beefy = pallet_beefy; + + // MMR leaf construction must be after session in order to have a leaf's next_auth_set + // refer to block. See issue polkadot-fellows/runtimes#160 for details. + #[runtime::pallet_index(42)] + pub type Mmr = pallet_mmr; + + #[runtime::pallet_index(43)] + pub type MmrLeaf = pallet_beefy_mmr; + + #[runtime::pallet_index(44)] + pub type Lottery = pallet_lottery; + + #[runtime::pallet_index(45)] + pub type Nis = pallet_nis; + + #[runtime::pallet_index(46)] + pub type Uniques = pallet_uniques; + + #[runtime::pallet_index(47)] + pub type Nfts = pallet_nfts; + + #[runtime::pallet_index(48)] + pub type NftFractionalization = pallet_nft_fractionalization; + + #[runtime::pallet_index(49)] + pub type Salary = pallet_salary; + + #[runtime::pallet_index(50)] + pub type CoreFellowship = pallet_core_fellowship; + + #[runtime::pallet_index(51)] + pub type TransactionStorage = pallet_transaction_storage; + + #[runtime::pallet_index(52)] + pub type VoterList = pallet_bags_list; + + #[runtime::pallet_index(53)] + pub type StateTrieMigration = pallet_state_trie_migration; + + #[runtime::pallet_index(54)] + pub type ChildBounties = pallet_child_bounties; + + #[runtime::pallet_index(55)] + pub type Referenda = pallet_referenda; + + #[runtime::pallet_index(56)] + pub type Remark = pallet_remark; + + #[runtime::pallet_index(57)] + pub type RootTesting = pallet_root_testing; + + #[runtime::pallet_index(58)] + pub type ConvictionVoting = pallet_conviction_voting; + + #[runtime::pallet_index(59)] + pub type Whitelist = pallet_whitelist; + + #[runtime::pallet_index(60)] + pub type AllianceMotion = pallet_collective; + + #[runtime::pallet_index(61)] + pub type Alliance = pallet_alliance; + + #[runtime::pallet_index(62)] + pub type NominationPools = pallet_nomination_pools; + + #[runtime::pallet_index(63)] + pub type RankedPolls = pallet_referenda; + + #[runtime::pallet_index(64)] + pub type RankedCollective = pallet_ranked_collective; + + #[runtime::pallet_index(65)] + pub type AssetConversion = pallet_asset_conversion; + + #[runtime::pallet_index(66)] + pub type FastUnstake = pallet_fast_unstake; + + #[runtime::pallet_index(67)] + pub type MessageQueue = pallet_message_queue; + + #[runtime::pallet_index(68)] + pub type Pov = frame_benchmarking_pallet_pov; + + #[runtime::pallet_index(69)] + pub type TxPause = pallet_tx_pause; + + #[runtime::pallet_index(70)] + pub type SafeMode = pallet_safe_mode; + + #[runtime::pallet_index(71)] + pub type Statement = pallet_statement; + + #[runtime::pallet_index(72)] + pub type MultiBlockMigrations = pallet_migrations; + + #[runtime::pallet_index(73)] + pub type Broker = pallet_broker; + + #[runtime::pallet_index(74)] + pub type TasksExample = pallet_example_tasks; + + #[runtime::pallet_index(75)] + pub type Mixnet = pallet_mixnet; + + #[runtime::pallet_index(76)] + pub type Parameters = pallet_parameters; + + #[runtime::pallet_index(77)] + pub type SkipFeelessPayment = pallet_skip_feeless_payment; +} /// The address format for describing accounts. pub type Address = sp_runtime::MultiAddress; diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index 113af41751e..be60068f122 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -109,7 +109,9 @@ try-runtime = [ "sp-debug-derive/force-debug", "sp-runtime/try-runtime", ] -experimental = [] +experimental = [ + "frame-support-procedural/experimental", +] # By default some types have documentation, `no-metadata-docs` allows to reduce the documentation # in the metadata. no-metadata-docs = [ diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index 85947503802..dd0688f2ad0 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -38,6 +38,7 @@ regex = "1" default = ["std"] std = ["sp-crypto-hashing/std"] no-metadata-docs = [] +experimental = [] # Generate impl-trait for tuples with the given number of tuples. Will be needed as the number of # pallets in a runtime grows. Does increase the compile time! tuples-96 = [] diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 6e95fdf116a..1937dfa9ca4 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -208,8 +208,8 @@ //! This macro returns the ` :: expanded { Error }` list of additional parts we would like to //! expose. -mod expand; -mod parse; +pub(crate) mod expand; +pub(crate) mod parse; use crate::pallet::parse::helper::two128_str; use cfg_expr::Predicate; @@ -515,7 +515,7 @@ fn construct_runtime_final_expansion( Ok(res) } -fn decl_all_pallets<'a>( +pub(crate) fn decl_all_pallets<'a>( runtime: &'a Ident, pallet_declarations: impl Iterator, features: &HashSet<&str>, @@ -624,7 +624,8 @@ fn decl_all_pallets<'a>( #( #all_pallets_without_system )* ) } -fn decl_pallet_runtime_setup( + +pub(crate) fn decl_pallet_runtime_setup( runtime: &Ident, pallet_declarations: &[Pallet], scrate: &TokenStream2, @@ -730,7 +731,7 @@ fn decl_pallet_runtime_setup( ) } -fn decl_integrity_test(scrate: &TokenStream2) -> TokenStream2 { +pub(crate) fn decl_integrity_test(scrate: &TokenStream2) -> TokenStream2 { quote!( #[cfg(test)] mod __construct_runtime_integrity_test { @@ -745,7 +746,7 @@ fn decl_integrity_test(scrate: &TokenStream2) -> TokenStream2 { ) } -fn decl_static_assertions( +pub(crate) fn decl_static_assertions( runtime: &Ident, pallet_decls: &[Pallet], scrate: &TokenStream2, @@ -776,7 +777,7 @@ fn decl_static_assertions( } } -fn check_pallet_number(input: TokenStream2, pallet_num: usize) -> Result<()> { +pub(crate) fn check_pallet_number(input: TokenStream2, pallet_num: usize) -> Result<()> { let max_pallet_num = { if cfg!(feature = "tuples-96") { 96 diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 89e39a5c9bb..dee6d522d25 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -31,6 +31,7 @@ mod match_and_insert; mod no_bound; mod pallet; mod pallet_error; +mod runtime; mod storage_alias; mod transactional; mod tt_macro; @@ -1220,6 +1221,73 @@ pub fn import_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { .into() } +/// Construct a runtime, with the given name and the given pallets. +/// +/// # Example: +/// +/// ```ignore +/// #[frame_support::runtime] +/// mod runtime { +/// // The main runtime +/// #[runtime::runtime] +/// // Runtime Types to be generated +/// #[runtime::derive( +/// RuntimeCall, +/// RuntimeEvent, +/// RuntimeError, +/// RuntimeOrigin, +/// RuntimeFreezeReason, +/// RuntimeHoldReason, +/// RuntimeSlashReason, +/// RuntimeLockId, +/// RuntimeTask, +/// )] +/// pub struct Runtime; +/// +/// #[runtime::pallet_index(0)] +/// pub type System = frame_system; +/// +/// #[runtime::pallet_index(1)] +/// pub type Test = path::to::test; +/// +/// // Pallet with instance. +/// #[runtime::pallet_index(2)] +/// pub type Test2_Instance1 = test2; +/// +/// // Pallet with calls disabled. +/// #[runtime::pallet_index(3)] +/// #[runtime::disable_call] +/// pub type Test3 = test3; +/// +/// // Pallet with unsigned extrinsics disabled. +/// #[runtime::pallet_index(4)] +/// #[runtime::disable_unsigned] +/// pub type Test4 = test4; +/// } +/// ``` +/// +/// # Legacy Ordering +/// +/// An optional attribute can be defined as #[frame_support::runtime(legacy_ordering)] to +/// ensure that the order of hooks is same as the order of pallets (and not based on the +/// pallet_index). This is to support legacy runtimes and should be avoided for new ones. +/// +/// # Note +/// +/// The population of the genesis storage depends on the order of pallets. So, if one of your +/// pallets depends on another pallet, the pallet that is depended upon needs to come before +/// the pallet depending on it. +/// +/// # Type definitions +/// +/// * The macro generates a type alias for each pallet to their `Pallet`. E.g. `type System = +/// frame_system::Pallet` +#[cfg(feature = "experimental")] +#[proc_macro_attribute] +pub fn runtime(attr: TokenStream, item: TokenStream) -> TokenStream { + runtime::runtime(attr, item) +} + /// Mark a module that contains dynamic parameters. /// /// See the `pallet_parameters` for a full example. diff --git a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs index 7cc1415dfdd..99364aaa96c 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs @@ -28,6 +28,8 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { syn::Ident::new(&format!("__tt_default_parts_{}", count), def.item.span()); let extra_parts_unique_id = syn::Ident::new(&format!("__tt_extra_parts_{}", count), def.item.span()); + let default_parts_unique_id_v2 = + syn::Ident::new(&format!("__tt_default_parts_v2_{}", count), def.item.span()); let call_part = def.call.as_ref().map(|_| quote::quote!(Call,)); @@ -81,6 +83,58 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { .any(|c| matches!(c.composite_keyword, CompositeKeyword::SlashReason(_))) .then_some(quote::quote!(SlashReason,)); + let call_part_v2 = def.call.as_ref().map(|_| quote::quote!(+ Call)); + + let task_part_v2 = def.task_enum.as_ref().map(|_| quote::quote!(+ Task)); + + let storage_part_v2 = (!def.storages.is_empty()).then(|| quote::quote!(+ Storage)); + + let event_part_v2 = def.event.as_ref().map(|event| { + let gen = event.gen_kind.is_generic().then(|| quote::quote!()); + quote::quote!(+ Event #gen) + }); + + let error_part_v2 = def.error.as_ref().map(|_| quote::quote!(+ Error)); + + let origin_part_v2 = def.origin.as_ref().map(|origin| { + let gen = origin.is_generic.then(|| quote::quote!()); + quote::quote!(+ Origin #gen) + }); + + let config_part_v2 = def.genesis_config.as_ref().map(|genesis_config| { + let gen = genesis_config.gen_kind.is_generic().then(|| quote::quote!()); + quote::quote!(+ Config #gen) + }); + + let inherent_part_v2 = def.inherent.as_ref().map(|_| quote::quote!(+ Inherent)); + + let validate_unsigned_part_v2 = + def.validate_unsigned.as_ref().map(|_| quote::quote!(+ ValidateUnsigned)); + + let freeze_reason_part_v2 = def + .composites + .iter() + .any(|c| matches!(c.composite_keyword, CompositeKeyword::FreezeReason(_))) + .then_some(quote::quote!(+ FreezeReason)); + + let hold_reason_part_v2 = def + .composites + .iter() + .any(|c| matches!(c.composite_keyword, CompositeKeyword::HoldReason(_))) + .then_some(quote::quote!(+ HoldReason)); + + let lock_id_part_v2 = def + .composites + .iter() + .any(|c| matches!(c.composite_keyword, CompositeKeyword::LockId(_))) + .then_some(quote::quote!(+ LockId)); + + let slash_reason_part_v2 = def + .composites + .iter() + .any(|c| matches!(c.composite_keyword, CompositeKeyword::SlashReason(_))) + .then_some(quote::quote!(+ SlashReason)); + quote::quote!( // This macro follows the conventions as laid out by the `tt-call` crate. It does not // accept any arguments and simply returns the pallet parts, separated by commas, then @@ -138,5 +192,25 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { } pub use #extra_parts_unique_id as tt_extra_parts; + + #[macro_export] + #[doc(hidden)] + macro_rules! #default_parts_unique_id_v2 { + { + $caller:tt + frame_support = [{ $($frame_support:ident)::* }] + } => { + $($frame_support)*::__private::tt_return! { + $caller + tokens = [{ + + Pallet #call_part_v2 #storage_part_v2 #event_part_v2 #error_part_v2 #origin_part_v2 #config_part_v2 + #inherent_part_v2 #validate_unsigned_part_v2 #freeze_reason_part_v2 #task_part_v2 + #hold_reason_part_v2 #lock_id_part_v2 #slash_reason_part_v2 + }] + } + }; + } + + pub use #default_parts_unique_id_v2 as tt_default_parts_v2; ) } diff --git a/substrate/frame/support/procedural/src/pallet/parse/helper.rs b/substrate/frame/support/procedural/src/pallet/parse/helper.rs index 538226a8745..3187c9139c8 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/helper.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/helper.rs @@ -148,6 +148,12 @@ impl MutItemAttrs for syn::ImplItemFn { } } +impl MutItemAttrs for syn::ItemType { + fn mut_item_attrs(&mut self) -> Option<&mut Vec> { + Some(&mut self.attrs) + } +} + /// Parse for `()` struct Unit; impl syn::parse::Parse for Unit { diff --git a/substrate/frame/support/procedural/src/runtime/expand/mod.rs b/substrate/frame/support/procedural/src/runtime/expand/mod.rs new file mode 100644 index 00000000000..93c88fce94b --- /dev/null +++ b/substrate/frame/support/procedural/src/runtime/expand/mod.rs @@ -0,0 +1,320 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::parse::runtime_types::RuntimeType; +use crate::{ + construct_runtime::{ + check_pallet_number, decl_all_pallets, decl_integrity_test, decl_pallet_runtime_setup, + decl_static_assertions, expand, + }, + runtime::{ + parse::{ + AllPalletsDeclaration, ExplicitAllPalletsDeclaration, ImplicitAllPalletsDeclaration, + }, + Def, + }, +}; +use cfg_expr::Predicate; +use frame_support_procedural_tools::{ + generate_access_from_frame_or_crate, generate_crate_access, generate_hidden_includes, +}; +use proc_macro2::TokenStream as TokenStream2; +use quote::quote; +use std::collections::HashSet; +use syn::{Ident, Result}; + +/// The fixed name of the system pallet. +const SYSTEM_PALLET_NAME: &str = "System"; + +pub fn expand(def: Def, legacy_ordering: bool) -> TokenStream2 { + let input = def.input; + + let (check_pallet_number_res, res) = match def.pallets { + AllPalletsDeclaration::Implicit(ref decl) => ( + check_pallet_number(input.clone(), decl.pallet_count), + construct_runtime_implicit_to_explicit(input.into(), decl.clone(), legacy_ordering), + ), + AllPalletsDeclaration::Explicit(ref decl) => ( + check_pallet_number(input, decl.pallets.len()), + construct_runtime_final_expansion( + def.runtime_struct.ident.clone(), + decl.clone(), + def.runtime_types.clone(), + legacy_ordering, + ), + ), + }; + + let res = res.unwrap_or_else(|e| e.to_compile_error()); + + // We want to provide better error messages to the user and thus, handle the error here + // separately. If there is an error, we print the error and still generate all of the code to + // get in overall less errors for the user. + let res = if let Err(error) = check_pallet_number_res { + let error = error.to_compile_error(); + + quote! { + #error + + #res + } + } else { + res + }; + + let res = expander::Expander::new("construct_runtime") + .dry(std::env::var("FRAME_EXPAND").is_err()) + .verbose(true) + .write_to_out_dir(res) + .expect("Does not fail because of IO in OUT_DIR; qed"); + + res.into() +} + +fn construct_runtime_implicit_to_explicit( + input: TokenStream2, + definition: ImplicitAllPalletsDeclaration, + legacy_ordering: bool, +) -> Result { + let frame_support = generate_access_from_frame_or_crate("frame-support")?; + let attr = if legacy_ordering { quote!((legacy_ordering)) } else { quote!() }; + let mut expansion = quote::quote!( + #[frame_support::runtime #attr] + #input + ); + for pallet in definition.pallet_decls.iter() { + let pallet_path = &pallet.path; + let pallet_name = &pallet.name; + let pallet_instance = pallet.instance.as_ref().map(|instance| quote::quote!(<#instance>)); + expansion = quote::quote!( + #frame_support::__private::tt_call! { + macro = [{ #pallet_path::tt_default_parts_v2 }] + frame_support = [{ #frame_support }] + ~~> #frame_support::match_and_insert! { + target = [{ #expansion }] + pattern = [{ #pallet_name = #pallet_path #pallet_instance }] + } + } + ); + } + + Ok(expansion) +} + +fn construct_runtime_final_expansion( + name: Ident, + definition: ExplicitAllPalletsDeclaration, + runtime_types: Vec, + legacy_ordering: bool, +) -> Result { + let ExplicitAllPalletsDeclaration { mut pallets, name: pallets_name } = definition; + + if !legacy_ordering { + // Ensure that order of hooks is based on the pallet index + pallets.sort_by_key(|p| p.index); + } + + let system_pallet = + pallets.iter().find(|decl| decl.name == SYSTEM_PALLET_NAME).ok_or_else(|| { + syn::Error::new( + pallets_name.span(), + "`System` pallet declaration is missing. \ + Please add this line: `pub type System = frame_system;`", + ) + })?; + if !system_pallet.cfg_pattern.is_empty() { + return Err(syn::Error::new( + system_pallet.name.span(), + "`System` pallet declaration is feature gated, please remove any `#[cfg]` attributes", + )) + } + + let features = pallets + .iter() + .filter_map(|decl| { + (!decl.cfg_pattern.is_empty()).then(|| { + decl.cfg_pattern.iter().flat_map(|attr| { + attr.predicates().filter_map(|pred| match pred { + Predicate::Feature(feat) => Some(feat), + Predicate::Test => Some("test"), + _ => None, + }) + }) + }) + }) + .flatten() + .collect::>(); + + let hidden_crate_name = "construct_runtime"; + let scrate = generate_crate_access(hidden_crate_name, "frame-support"); + let scrate_decl = generate_hidden_includes(hidden_crate_name, "frame-support"); + + let frame_system = generate_access_from_frame_or_crate("frame-system")?; + let block = quote!(<#name as #frame_system::Config>::Block); + let unchecked_extrinsic = quote!(<#block as #scrate::sp_runtime::traits::Block>::Extrinsic); + + let mut dispatch = None; + let mut outer_event = None; + let mut outer_error = None; + let mut outer_origin = None; + let mut freeze_reason = None; + let mut hold_reason = None; + let mut slash_reason = None; + let mut lock_id = None; + let mut task = None; + + for runtime_type in runtime_types.iter() { + match runtime_type { + RuntimeType::RuntimeCall(_) => { + dispatch = + Some(expand::expand_outer_dispatch(&name, system_pallet, &pallets, &scrate)); + }, + RuntimeType::RuntimeEvent(_) => { + outer_event = Some(expand::expand_outer_enum( + &name, + &pallets, + &scrate, + expand::OuterEnumType::Event, + )?); + }, + RuntimeType::RuntimeError(_) => { + outer_error = Some(expand::expand_outer_enum( + &name, + &pallets, + &scrate, + expand::OuterEnumType::Error, + )?); + }, + RuntimeType::RuntimeOrigin(_) => { + outer_origin = + Some(expand::expand_outer_origin(&name, system_pallet, &pallets, &scrate)?); + }, + RuntimeType::RuntimeFreezeReason(_) => { + freeze_reason = Some(expand::expand_outer_freeze_reason(&pallets, &scrate)); + }, + RuntimeType::RuntimeHoldReason(_) => { + hold_reason = Some(expand::expand_outer_hold_reason(&pallets, &scrate)); + }, + RuntimeType::RuntimeSlashReason(_) => { + slash_reason = Some(expand::expand_outer_slash_reason(&pallets, &scrate)); + }, + RuntimeType::RuntimeLockId(_) => { + lock_id = Some(expand::expand_outer_lock_id(&pallets, &scrate)); + }, + RuntimeType::RuntimeTask(_) => { + task = Some(expand::expand_outer_task(&name, &pallets, &scrate)); + }, + } + } + + let all_pallets = decl_all_pallets(&name, pallets.iter(), &features); + let pallet_to_index = decl_pallet_runtime_setup(&name, &pallets, &scrate); + + let metadata = expand::expand_runtime_metadata( + &name, + &pallets, + &scrate, + &unchecked_extrinsic, + &system_pallet.path, + ); + let outer_config = expand::expand_outer_config(&name, &pallets, &scrate); + let inherent = + expand::expand_outer_inherent(&name, &block, &unchecked_extrinsic, &pallets, &scrate); + let validate_unsigned = expand::expand_outer_validate_unsigned(&name, &pallets, &scrate); + let integrity_test = decl_integrity_test(&scrate); + let static_assertions = decl_static_assertions(&name, &pallets, &scrate); + + let res = quote!( + #scrate_decl + + // Prevent UncheckedExtrinsic to print unused warning. + const _: () = { + #[allow(unused)] + type __hidden_use_of_unchecked_extrinsic = #unchecked_extrinsic; + }; + + #[derive( + Clone, Copy, PartialEq, Eq, #scrate::sp_runtime::RuntimeDebug, + #scrate::__private::scale_info::TypeInfo + )] + pub struct #name; + impl #scrate::sp_runtime::traits::GetRuntimeBlockType for #name { + type RuntimeBlock = #block; + } + + // Each runtime must expose the `runtime_metadata()` to fetch the runtime API metadata. + // The function is implemented by calling `impl_runtime_apis!`. + // + // However, the `runtime` may be used without calling `impl_runtime_apis!`. + // Rely on the `Deref` trait to differentiate between a runtime that implements + // APIs (by macro impl_runtime_apis!) and a runtime that is simply created (by macro runtime). + // + // Both `InternalConstructRuntime` and `InternalImplRuntimeApis` expose a `runtime_metadata()` function. + // `InternalConstructRuntime` is implemented by the `runtime` for Runtime references (`& Runtime`), + // while `InternalImplRuntimeApis` is implemented by the `impl_runtime_apis!` for Runtime (`Runtime`). + // + // Therefore, the `Deref` trait will resolve the `runtime_metadata` from `impl_runtime_apis!` + // when both macros are called; and will resolve an empty `runtime_metadata` when only the `runtime` + // is used. + + #[doc(hidden)] + trait InternalConstructRuntime { + #[inline(always)] + fn runtime_metadata(&self) -> #scrate::__private::sp_std::vec::Vec<#scrate::__private::metadata_ir::RuntimeApiMetadataIR> { + Default::default() + } + } + #[doc(hidden)] + impl InternalConstructRuntime for &#name {} + + #outer_event + + #outer_error + + #outer_origin + + #all_pallets + + #pallet_to_index + + #dispatch + + #task + + #metadata + + #outer_config + + #inherent + + #validate_unsigned + + #freeze_reason + + #hold_reason + + #lock_id + + #slash_reason + + #integrity_test + + #static_assertions + ); + + Ok(res) +} diff --git a/substrate/frame/support/procedural/src/runtime/mod.rs b/substrate/frame/support/procedural/src/runtime/mod.rs new file mode 100644 index 00000000000..aaae579eb08 --- /dev/null +++ b/substrate/frame/support/procedural/src/runtime/mod.rs @@ -0,0 +1,236 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of `runtime`. +//! +//! `runtime` implementation is recursive and can generate code which will call itself +//! in order to get all the pallet parts for each pallet. +//! +//! Pallets can define their parts: +//! - Implicitly: `pub type System = frame_system;` +//! - Explicitly: `pub type System = frame_system + Pallet + Call;` +//! +//! The `runtime` transitions from the implicit definition to the explicit one. +//! From the explicit state, Substrate expands the pallets with additional information +//! that is to be included in the runtime metadata. +//! +//! Pallets must provide the `tt_default_parts_v2` macro for these transitions. +//! These are automatically implemented by the `#[pallet::pallet]` macro. +//! +//! This macro also generates the following enums for ease of decoding if the respective type +//! is defined inside `#[runtime::derive]`: +//! - `enum RuntimeCall`: This type contains the information needed to decode extrinsics. +//! - `enum RuntimeEvent`: This type contains the information needed to decode events. +//! - `enum RuntimeError`: While this cannot be used directly to decode `sp_runtime::DispatchError` +//! from the chain, it contains the information needed to decode the +//! `sp_runtime::DispatchError::Module`. +//! +//! # State Transitions +//! +//! ```ignore +//! +----------+ +//! | Implicit | +//! +----------+ +//! | +//! v +//! +----------+ +//! | Explicit | +//! +----------+ +//! ``` +//! +//! The `runtime` macro transforms the implicit declaration of each pallet +//! `System: frame_system` to an explicit one `System: frame_system + Pallet + Call` using the +//! `tt_default_parts_v2` macro. +//! +//! The `tt_default_parts_v2` macro exposes a plus separated list of pallet parts. For example, the +//! `Event` part is exposed only if the pallet implements an event via `#[pallet::event]` macro. +//! The tokens generated by this macro are `+ Pallet + Call` for our example. +//! +//! The `match_and_insert` macro takes in 3 arguments: +//! - target: This is the `TokenStream` that contains the `runtime` macro. +//! - pattern: The pattern to match against in the target stream. +//! - tokens: The tokens to added after the pattern match. +//! +//! The `runtime` macro uses the `tt_call` to get the default pallet parts via +//! the `tt_default_parts_v2` macro defined by each pallet. The pallet parts are then returned as +//! input to the `match_and_replace` macro. +//! The `match_and_replace` then will modify the `runtime` to expand the implicit +//! definition to the explicit one. +//! +//! For example, +//! +//! ```ignore +//! #[frame_support::runtime] +//! mod runtime { +//! //... +//! +//! #[runtime::pallet_index(0)] +//! pub type System = frame_system; // Implicit definition of parts +//! +//! #[runtime::pallet_index(1)] +//! pub type Balances = pallet_balances; // Implicit definition of parts +//! } +//! ``` +//! This call has some implicit pallet parts, thus it will expand to: +//! ```ignore +//! frame_support::__private::tt_call! { +//! macro = [{ pallet_balances::tt_default_parts_v2 }] +//! ~~> frame_support::match_and_insert! { +//! target = [{ +//! frame_support::__private::tt_call! { +//! macro = [{ frame_system::tt_default_parts_v2 }] +//! ~~> frame_support::match_and_insert! { +//! target = [{ +//! #[frame_support::runtime] +//! mod runtime { +//! //... +//! +//! #[runtime::pallet_index(0)] +//! pub type System = frame_system; +//! +//! #[runtime::pallet_index(1)] +//! pub type Balances = pallet_balances; +//! } +//! }] +//! pattern = [{ System = frame_system }] +//! } +//! } +//! }] +//! pattern = [{ Balances = pallet_balances }] +//! } +//! } +//! ``` +//! `tt_default_parts_v2` must be defined. It returns the pallet parts inside some tokens, and +//! then `tt_call` will pipe the returned pallet parts into the input of `match_and_insert`. +//! Thus `match_and_insert` will initially receive the following inputs: +//! ```ignore +//! frame_support::match_and_insert! { +//! target = [{ +//! frame_support::match_and_insert! { +//! target = [{ +//! #[frame_support::runtime] +//! mod runtime { +//! //... +//! +//! #[runtime::pallet_index(0)] +//! pub type System = frame_system; +//! +//! #[runtime::pallet_index(1)] +//! pub type Balances = pallet_balances; +//! } +//! }] +//! pattern = [{ System = frame_system }] +//! tokens = [{ ::{+ Pallet + Call} }] +//! } +//! }] +//! pattern = [{ Balances = pallet_balances }] +//! tokens = [{ ::{+ Pallet + Call} }] +//! } +//! ``` +//! After dealing with `pallet_balances`, the inner `match_and_insert` will expand to: +//! ```ignore +//! frame_support::match_and_insert! { +//! target = [{ +//! #[frame_support::runtime] +//! mod runtime { +//! //... +//! +//! #[runtime::pallet_index(0)] +//! pub type System = frame_system; // Implicit definition of parts +//! +//! #[runtime::pallet_index(1)] +//! pub type Balances = pallet_balances + Pallet + Call; // Explicit definition of parts +//! } +//! }] +//! pattern = [{ System = frame_system }] +//! tokens = [{ ::{+ Pallet + Call} }] +//! } +//! ``` +//! +//! Which will then finally expand to the following: +//! ```ignore +//! #[frame_support::runtime] +//! mod runtime { +//! //... +//! +//! #[runtime::pallet_index(0)] +//! pub type System = frame_system + Pallet + Call; +//! +//! #[runtime::pallet_index(1)] +//! pub type Balances = pallet_balances + Pallet + Call; +//! } +//! ``` +//! +//! This call has no implicit pallet parts, thus it will expand to the runtime construction: +//! ```ignore +//! pub struct Runtime { ... } +//! pub struct Call { ... } +//! impl Call ... +//! pub enum Origin { ... } +//! ... +//! ``` +//! +//! Visualizing the entire flow of `#[frame_support::runtime]`, it would look like the following: +//! +//! ```ignore +//! +----------------------+ +------------------------+ +-------------------+ +//! | | | (defined in pallet) | | | +//! | runtime | --> | tt_default_parts_v2! | --> | match_and_insert! | +//! | w/ no pallet parts | | | | | +//! +----------------------+ +------------------------+ +-------------------+ +//! +//! +----------------------+ +//! | | +//! --> | runtime | +//! | w/ pallet parts | +//! +----------------------+ +//! ``` + +#![cfg(feature = "experimental")] + +pub use parse::Def; +use proc_macro::TokenStream; +use syn::spanned::Spanned; + +mod expand; +mod parse; + +mod keyword { + syn::custom_keyword!(legacy_ordering); +} + +pub fn runtime(attr: TokenStream, tokens: TokenStream) -> TokenStream { + let mut legacy_ordering = false; + if !attr.is_empty() { + if let Ok(_) = syn::parse::(attr.clone()) { + legacy_ordering = true; + } else { + let msg = "Invalid runtime macro call: unexpected attribute. Macro call must be \ + bare, such as `#[frame_support::runtime]` or `#[runtime]`, or must specify the \ + `legacy_ordering` attribute, such as `#[frame_support::runtime(legacy_ordering)]` or \ + #[runtime(legacy_ordering)]."; + let span = proc_macro2::TokenStream::from(attr).span(); + return syn::Error::new(span, msg).to_compile_error().into() + } + } + + let item = syn::parse_macro_input!(tokens as syn::ItemMod); + match parse::Def::try_from(item) { + Ok(def) => expand::expand(def, legacy_ordering).into(), + Err(e) => e.to_compile_error().into(), + } +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/helper.rs b/substrate/frame/support/procedural/src/runtime/parse/helper.rs new file mode 100644 index 00000000000..f05395f9b7a --- /dev/null +++ b/substrate/frame/support/procedural/src/runtime/parse/helper.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::parse::helper::MutItemAttrs; +use quote::ToTokens; + +pub(crate) fn take_first_item_runtime_attr( + item: &mut impl MutItemAttrs, +) -> syn::Result> +where + Attr: syn::parse::Parse, +{ + let attrs = if let Some(attrs) = item.mut_item_attrs() { attrs } else { return Ok(None) }; + + if let Some(index) = attrs.iter().position(|attr| { + attr.path().segments.first().map_or(false, |segment| segment.ident == "runtime") + }) { + let runtime_attr = attrs.remove(index); + Ok(Some(syn::parse2(runtime_attr.into_token_stream())?)) + } else { + Ok(None) + } +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/mod.rs b/substrate/frame/support/procedural/src/runtime/parse/mod.rs new file mode 100644 index 00000000000..893cb4726e2 --- /dev/null +++ b/substrate/frame/support/procedural/src/runtime/parse/mod.rs @@ -0,0 +1,266 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod helper; +pub mod pallet; +pub mod pallet_decl; +pub mod runtime_struct; +pub mod runtime_types; + +use crate::construct_runtime::parse::Pallet; +use pallet_decl::PalletDeclaration; +use proc_macro2::TokenStream as TokenStream2; +use quote::ToTokens; +use std::collections::HashMap; +use syn::{spanned::Spanned, Ident, Token}; + +use frame_support_procedural_tools::syn_ext as ext; +use runtime_types::RuntimeType; + +mod keyword { + use syn::custom_keyword; + + custom_keyword!(runtime); + custom_keyword!(derive); + custom_keyword!(pallet_index); + custom_keyword!(disable_call); + custom_keyword!(disable_unsigned); +} + +enum RuntimeAttr { + Runtime(proc_macro2::Span), + Derive(proc_macro2::Span, Vec), + PalletIndex(proc_macro2::Span, u8), + DisableCall(proc_macro2::Span), + DisableUnsigned(proc_macro2::Span), +} + +impl RuntimeAttr { + fn span(&self) -> proc_macro2::Span { + match self { + Self::Runtime(span) => *span, + Self::Derive(span, _) => *span, + Self::PalletIndex(span, _) => *span, + Self::DisableCall(span) => *span, + Self::DisableUnsigned(span) => *span, + } + } +} + +impl syn::parse::Parse for RuntimeAttr { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + + let lookahead = content.lookahead1(); + if lookahead.peek(keyword::runtime) { + Ok(RuntimeAttr::Runtime(content.parse::()?.span())) + } else if lookahead.peek(keyword::derive) { + let _ = content.parse::(); + let derive_content; + syn::parenthesized!(derive_content in content); + let runtime_types = + derive_content.parse::>()?; + let runtime_types = runtime_types.inner.into_iter().collect(); + Ok(RuntimeAttr::Derive(derive_content.span(), runtime_types)) + } else if lookahead.peek(keyword::pallet_index) { + let _ = content.parse::(); + let pallet_index_content; + syn::parenthesized!(pallet_index_content in content); + let pallet_index = pallet_index_content.parse::()?; + if !pallet_index.suffix().is_empty() { + let msg = "Number literal must not have a suffix"; + return Err(syn::Error::new(pallet_index.span(), msg)) + } + Ok(RuntimeAttr::PalletIndex(pallet_index.span(), pallet_index.base10_parse()?)) + } else if lookahead.peek(keyword::disable_call) { + Ok(RuntimeAttr::DisableCall(content.parse::()?.span())) + } else if lookahead.peek(keyword::disable_unsigned) { + Ok(RuntimeAttr::DisableUnsigned(content.parse::()?.span())) + } else { + Err(lookahead.error()) + } + } +} + +#[derive(Debug, Clone)] +pub enum AllPalletsDeclaration { + Implicit(ImplicitAllPalletsDeclaration), + Explicit(ExplicitAllPalletsDeclaration), +} + +/// Declaration of a runtime with some pallet with implicit declaration of parts. +#[derive(Debug, Clone)] +pub struct ImplicitAllPalletsDeclaration { + pub name: Ident, + pub pallet_decls: Vec, + pub pallet_count: usize, +} + +/// Declaration of a runtime with all pallet having explicit declaration of parts. +#[derive(Debug, Clone)] +pub struct ExplicitAllPalletsDeclaration { + pub name: Ident, + pub pallets: Vec, +} + +pub struct Def { + pub input: TokenStream2, + pub item: syn::ItemMod, + pub runtime_struct: runtime_struct::RuntimeStructDef, + pub pallets: AllPalletsDeclaration, + pub runtime_types: Vec, +} + +impl Def { + pub fn try_from(mut item: syn::ItemMod) -> syn::Result { + let input: TokenStream2 = item.to_token_stream().into(); + let item_span = item.span(); + let items = &mut item + .content + .as_mut() + .ok_or_else(|| { + let msg = "Invalid runtime definition, expected mod to be inlined."; + syn::Error::new(item_span, msg) + })? + .1; + + let mut runtime_struct = None; + let mut runtime_types = None; + + let mut indices = HashMap::new(); + let mut names = HashMap::new(); + + let mut pallet_decls = vec![]; + let mut pallets = vec![]; + + for item in items.iter_mut() { + let mut pallet_item = None; + let mut pallet_index = 0; + + let mut disable_call = false; + let mut disable_unsigned = false; + + while let Some(runtime_attr) = + helper::take_first_item_runtime_attr::(item)? + { + match runtime_attr { + RuntimeAttr::Runtime(span) if runtime_struct.is_none() => { + let p = runtime_struct::RuntimeStructDef::try_from(span, item)?; + runtime_struct = Some(p); + }, + RuntimeAttr::Derive(_, types) if runtime_types.is_none() => { + runtime_types = Some(types); + }, + RuntimeAttr::PalletIndex(span, index) => { + pallet_index = index; + pallet_item = if let syn::Item::Type(item) = item { + Some(item.clone()) + } else { + let msg = "Invalid runtime::pallet_index, expected type definition"; + return Err(syn::Error::new(span, msg)) + }; + }, + RuntimeAttr::DisableCall(_) => disable_call = true, + RuntimeAttr::DisableUnsigned(_) => disable_unsigned = true, + attr => { + let msg = "Invalid duplicated attribute"; + return Err(syn::Error::new(attr.span(), msg)) + }, + } + } + + if let Some(pallet_item) = pallet_item { + match *pallet_item.ty.clone() { + syn::Type::Path(ref path) => { + let pallet_decl = + PalletDeclaration::try_from(item.span(), &pallet_item, path)?; + + if let Some(used_pallet) = + names.insert(pallet_decl.name.clone(), pallet_decl.name.span()) + { + let msg = "Two pallets with the same name!"; + + let mut err = syn::Error::new(used_pallet, &msg); + err.combine(syn::Error::new(pallet_decl.name.span(), &msg)); + return Err(err) + } + + pallet_decls.push(pallet_decl); + }, + syn::Type::TraitObject(syn::TypeTraitObject { bounds, .. }) => { + let pallet = Pallet::try_from( + item.span(), + &pallet_item, + pallet_index, + disable_call, + disable_unsigned, + &bounds, + )?; + + if let Some(used_pallet) = indices.insert(pallet.index, pallet.name.clone()) + { + let msg = format!( + "Pallet indices are conflicting: Both pallets {} and {} are at index {}", + used_pallet, pallet.name, pallet.index, + ); + let mut err = syn::Error::new(used_pallet.span(), &msg); + err.combine(syn::Error::new(pallet.name.span(), msg)); + return Err(err) + } + + pallets.push(pallet); + }, + _ => continue, + } + } + } + + let name = item.ident.clone(); + let decl_count = pallet_decls.len(); + let pallets = if decl_count > 0 { + AllPalletsDeclaration::Implicit(ImplicitAllPalletsDeclaration { + name, + pallet_decls, + pallet_count: decl_count.saturating_add(pallets.len()), + }) + } else { + AllPalletsDeclaration::Explicit(ExplicitAllPalletsDeclaration { name, pallets }) + }; + + let def = Def { + input, + item, + runtime_struct: runtime_struct.ok_or_else(|| { + syn::Error::new(item_span, + "Missing Runtime. Please add a struct inside the module and annotate it with `#[runtime::runtime]`" + ) + })?, + pallets, + runtime_types: runtime_types.ok_or_else(|| { + syn::Error::new(item_span, + "Missing Runtime Types. Please annotate the runtime struct with `#[runtime::derive]`" + ) + })?, + }; + + Ok(def) + } +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs new file mode 100644 index 00000000000..d2f1857fb2b --- /dev/null +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs @@ -0,0 +1,99 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::construct_runtime::parse::{Pallet, PalletPart, PalletPartKeyword, PalletPath}; +use quote::ToTokens; +use syn::{punctuated::Punctuated, spanned::Spanned, token, Error, Ident, PathArguments}; + +impl Pallet { + pub fn try_from( + attr_span: proc_macro2::Span, + item: &syn::ItemType, + pallet_index: u8, + disable_call: bool, + disable_unsigned: bool, + bounds: &Punctuated, + ) -> syn::Result { + let name = item.ident.clone(); + + let mut pallet_path = None; + let mut pallet_parts = vec![]; + + for (index, bound) in bounds.into_iter().enumerate() { + if let syn::TypeParamBound::Trait(syn::TraitBound { path, .. }) = bound { + if index == 0 { + pallet_path = Some(PalletPath { inner: path.clone() }); + } else { + let pallet_part = syn::parse2::(bound.into_token_stream())?; + pallet_parts.push(pallet_part); + } + } else { + return Err(Error::new( + attr_span, + "Invalid pallet declaration, expected a path or a trait object", + )) + }; + } + + let mut path = pallet_path.ok_or(Error::new( + attr_span, + "Invalid pallet declaration, expected a path or a trait object", + ))?; + + let mut instance = None; + if let Some(segment) = path.inner.segments.iter_mut().find(|seg| !seg.arguments.is_empty()) + { + if let PathArguments::AngleBracketed(syn::AngleBracketedGenericArguments { + args, .. + }) = segment.arguments.clone() + { + if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.first() { + instance = + Some(Ident::new(&arg_path.to_token_stream().to_string(), arg_path.span())); + segment.arguments = PathArguments::None; + } + } + } + + pallet_parts = pallet_parts + .into_iter() + .filter(|part| { + if let (true, &PalletPartKeyword::Call(_)) = (disable_call, &part.keyword) { + false + } else if let (true, &PalletPartKeyword::ValidateUnsigned(_)) = + (disable_unsigned, &part.keyword) + { + false + } else { + true + } + }) + .collect(); + + let cfg_pattern = vec![]; + + Ok(Pallet { + is_expanded: true, + name, + index: pallet_index, + path, + instance, + cfg_pattern, + pallet_parts, + }) + } +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs new file mode 100644 index 00000000000..437a163cfbc --- /dev/null +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs @@ -0,0 +1,60 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use quote::ToTokens; +use syn::{spanned::Spanned, Attribute, Ident, PathArguments}; + +/// The declaration of a pallet. +#[derive(Debug, Clone)] +pub struct PalletDeclaration { + /// The name of the pallet, e.g.`System` in `System: frame_system`. + pub name: Ident, + /// Optional attributes tagged right above a pallet declaration. + pub attrs: Vec, + /// The path of the pallet, e.g. `frame_system` in `System: frame_system`. + pub path: syn::Path, + /// The instance of the pallet, e.g. `Instance1` in `Council: pallet_collective::`. + pub instance: Option, +} + +impl PalletDeclaration { + pub fn try_from( + _attr_span: proc_macro2::Span, + item: &syn::ItemType, + path: &syn::TypePath, + ) -> syn::Result { + let name = item.ident.clone(); + + let mut path = path.path.clone(); + + let mut instance = None; + if let Some(segment) = path.segments.iter_mut().find(|seg| !seg.arguments.is_empty()) { + if let PathArguments::AngleBracketed(syn::AngleBracketedGenericArguments { + args, .. + }) = segment.arguments.clone() + { + if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.first() { + instance = + Some(Ident::new(&arg_path.to_token_stream().to_string(), arg_path.span())); + segment.arguments = PathArguments::None; + } + } + } + + Ok(Self { name, path, instance, attrs: item.attrs.clone() }) + } +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/runtime_struct.rs b/substrate/frame/support/procedural/src/runtime/parse/runtime_struct.rs new file mode 100644 index 00000000000..8fa746ee807 --- /dev/null +++ b/substrate/frame/support/procedural/src/runtime/parse/runtime_struct.rs @@ -0,0 +1,35 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use syn::spanned::Spanned; +pub struct RuntimeStructDef { + pub ident: syn::Ident, + pub attr_span: proc_macro2::Span, +} + +impl RuntimeStructDef { + pub fn try_from(attr_span: proc_macro2::Span, item: &mut syn::Item) -> syn::Result { + let item = if let syn::Item::Struct(item) = item { + item + } else { + let msg = "Invalid runtime::runtime, expected struct definition"; + return Err(syn::Error::new(item.span(), msg)) + }; + + Ok(Self { ident: item.ident.clone(), attr_span }) + } +} diff --git a/substrate/frame/support/procedural/src/runtime/parse/runtime_types.rs b/substrate/frame/support/procedural/src/runtime/parse/runtime_types.rs new file mode 100644 index 00000000000..a4480e2a1fd --- /dev/null +++ b/substrate/frame/support/procedural/src/runtime/parse/runtime_types.rs @@ -0,0 +1,76 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use syn::{ + parse::{Parse, ParseStream}, + Result, +}; + +mod keyword { + use syn::custom_keyword; + + custom_keyword!(RuntimeCall); + custom_keyword!(RuntimeEvent); + custom_keyword!(RuntimeError); + custom_keyword!(RuntimeOrigin); + custom_keyword!(RuntimeFreezeReason); + custom_keyword!(RuntimeHoldReason); + custom_keyword!(RuntimeSlashReason); + custom_keyword!(RuntimeLockId); + custom_keyword!(RuntimeTask); +} + +#[derive(Debug, Clone, PartialEq)] +pub enum RuntimeType { + RuntimeCall(keyword::RuntimeCall), + RuntimeEvent(keyword::RuntimeEvent), + RuntimeError(keyword::RuntimeError), + RuntimeOrigin(keyword::RuntimeOrigin), + RuntimeFreezeReason(keyword::RuntimeFreezeReason), + RuntimeHoldReason(keyword::RuntimeHoldReason), + RuntimeSlashReason(keyword::RuntimeSlashReason), + RuntimeLockId(keyword::RuntimeLockId), + RuntimeTask(keyword::RuntimeTask), +} + +impl Parse for RuntimeType { + fn parse(input: ParseStream) -> Result { + let lookahead = input.lookahead1(); + + if lookahead.peek(keyword::RuntimeCall) { + Ok(Self::RuntimeCall(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeEvent) { + Ok(Self::RuntimeEvent(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeError) { + Ok(Self::RuntimeError(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeOrigin) { + Ok(Self::RuntimeOrigin(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeFreezeReason) { + Ok(Self::RuntimeFreezeReason(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeHoldReason) { + Ok(Self::RuntimeHoldReason(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeSlashReason) { + Ok(Self::RuntimeSlashReason(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeLockId) { + Ok(Self::RuntimeLockId(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeTask) { + Ok(Self::RuntimeTask(input.parse()?)) + } else { + Err(lookahead.error()) + } + } +} diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 07560abd935..af8449c9697 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -510,6 +510,9 @@ pub use frame_support_procedural::{ construct_runtime, match_and_insert, transactional, PalletError, RuntimeDebugNoBound, }; +#[cfg(feature = "experimental")] +pub use frame_support_procedural::runtime; + #[doc(hidden)] pub use frame_support_procedural::{__create_tt_macro, __generate_dummy_part_checker}; diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index ae2c56a531f..2f12cc00ed9 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -24,7 +24,7 @@ sp-api = { path = "../../../primitives/api", default-features = false } sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } sp-io = { path = "../../../primitives/io", default-features = false } sp-state-machine = { path = "../../../primitives/state-machine", optional = true } -frame-support = { path = "..", default-features = false } +frame-support = { path = "..", default-features = false, features = ["experimental"] } frame-benchmarking = { path = "../../benchmarking", default-features = false } sp-runtime = { path = "../../../primitives/runtime", default-features = false } sp-core = { path = "../../../primitives/core", default-features = false } diff --git a/substrate/frame/support/test/tests/construct_runtime.rs b/substrate/frame/support/test/tests/runtime.rs similarity index 89% rename from substrate/frame/support/test/tests/construct_runtime.rs rename to substrate/frame/support/test/tests/runtime.rs index 83691451ccd..7e33ee46458 100644 --- a/substrate/frame/support/test/tests/construct_runtime.rs +++ b/substrate/frame/support/test/tests/runtime.rs @@ -30,7 +30,7 @@ use scale_info::TypeInfo; use sp_core::{sr25519, ConstU64}; use sp_runtime::{ generic, - traits::{BlakeTwo256, Verify}, + traits::{BlakeTwo256, ValidateUnsigned, Verify}, DispatchError, ModuleError, }; use sp_version::RuntimeVersion; @@ -176,6 +176,17 @@ mod nested { impl BuildGenesisConfig for GenesisConfig { fn build(&self) {} } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned( + _source: TransactionSource, + _call: &Self::Call, + ) -> TransactionValidity { + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + } + } } } @@ -247,6 +258,20 @@ pub mod module3 { impl BuildGenesisConfig for GenesisConfig { fn build(&self) {} } + + #[pallet::storage] + pub type Storage = StorageValue<_, u32>; + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned( + _source: TransactionSource, + _call: &Self::Call, + ) -> TransactionValidity { + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + } + } } pub type BlockNumber = u64; @@ -256,24 +281,64 @@ pub type Header = generic::Header; pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; pub type Block = generic::Block; -frame_support::construct_runtime!( - pub struct Runtime - { - System: frame_system::{Pallet, Call, Event, Origin} = 30, - Module1_1: module1::::{Pallet, Call, Storage, Event, Origin}, - Module2: module2::{Pallet, Call, Storage, Event, Origin}, - Module1_2: module1::::{Pallet, Call, Storage, Event, Origin}, - NestedModule3: nested::module3::{Pallet, Call, Config, Storage, Event, Origin}, - Module3: self::module3::{Pallet, Call, Config, Storage, Event, Origin}, - Module1_3: module1::::{Pallet, Storage, Event } = 6, - Module1_4: module1::::{Pallet, Call, Event } = 3, - Module1_5: module1::::{Pallet, Event}, - Module1_6: module1::::{Pallet, Call, Storage, Event, Origin} = 1, - Module1_7: module1::::{Pallet, Call, Storage, Event, Origin}, - Module1_8: module1::::{Pallet, Call, Storage, Event, Origin} = 12, - Module1_9: module1::::{Pallet, Call, Storage, Event, Origin}, - } -); +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + #[runtime::pallet_index(30)] + pub type System = frame_system + Pallet + Call + Event + Origin; + + #[runtime::pallet_index(31)] + pub type Module1_1 = module1; + + #[runtime::pallet_index(32)] + pub type Module2 = module2; + + #[runtime::pallet_index(33)] + pub type Module1_2 = module1; + + #[runtime::pallet_index(34)] + pub type NestedModule3 = nested::module3; + + #[runtime::pallet_index(35)] + #[runtime::disable_unsigned] + pub type Module3 = self::module3; + + #[runtime::pallet_index(6)] + #[runtime::disable_call] + pub type Module1_3 = module1; + + #[runtime::pallet_index(3)] + pub type Module1_4 = module1; + + #[runtime::pallet_index(4)] + #[runtime::disable_call] + pub type Module1_5 = module1; + + #[runtime::pallet_index(1)] + pub type Module1_6 = module1; + + #[runtime::pallet_index(2)] + pub type Module1_7 = module1; + + #[runtime::pallet_index(12)] + pub type Module1_8 = module1; + + #[runtime::pallet_index(13)] + pub type Module1_9 = module1; +} #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { @@ -605,17 +670,17 @@ fn get_module_names() { let module_names = RuntimeCall::get_module_names(); assert_eq!( [ + "Module1_6", + "Module1_7", + "Module1_4", + "Module1_8", + "Module1_9", "System", "Module1_1", "Module2", "Module1_2", "NestedModule3", "Module3", - "Module1_4", - "Module1_6", - "Module1_7", - "Module1_8", - "Module1_9", ], module_names ); @@ -636,9 +701,13 @@ fn call_subtype_conversion() { #[test] fn test_metadata() { - use frame_metadata::{v14::*, *}; + use frame_metadata::{ + v14::{StorageEntryType::Plain, *}, + *, + }; use scale_info::meta_type; use sp_core::Encode; + use sp_metadata_ir::StorageEntryModifierIR::Optional; fn maybe_docs(doc: Vec<&'static str>) -> Vec<&'static str> { if cfg!(feature = "no-metadata-docs") { @@ -649,6 +718,69 @@ fn test_metadata() { } let pallets = vec![ + PalletMetadata { + name: "Module1_6", + storage:None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 1, + }, + PalletMetadata { + name: "Module1_7", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 2, + }, + PalletMetadata { + name: "Module1_4", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 3, + }, + PalletMetadata { + name: "Module1_5", + storage: None, + calls: None, + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 4, + }, + PalletMetadata { + name: "Module1_3", + storage: None, + calls: None, + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 6, + }, + PalletMetadata { + name: "Module1_8", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 12, + }, + PalletMetadata { + name: "Module1_9", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 13, + }, PalletMetadata { name: "System", storage: None, @@ -703,7 +835,7 @@ fn test_metadata() { }, PalletMetadata { name: "Module1_1", - storage: Some(PalletStorageMetadata { prefix: "Module1_1", entries: vec![] }), + storage: None, calls: Some(meta_type::>().into()), event: Some(meta_type::>().into()), constants: vec![], @@ -712,7 +844,7 @@ fn test_metadata() { }, PalletMetadata { name: "Module2", - storage: Some(PalletStorageMetadata { prefix: "Module2", entries: vec![] }), + storage: None, calls: Some(meta_type::>().into()), event: Some(meta_type::>().into()), constants: vec![], @@ -721,7 +853,7 @@ fn test_metadata() { }, PalletMetadata { name: "Module1_2", - storage: Some(PalletStorageMetadata { prefix: "Module1_2", entries: vec![] }), + storage: None, calls: Some(meta_type::>().into()), event: Some(meta_type::>().into()), constants: vec![], @@ -730,7 +862,7 @@ fn test_metadata() { }, PalletMetadata { name: "NestedModule3", - storage: Some(PalletStorageMetadata { prefix: "NestedModule3", entries: vec![] }), + storage: None, calls: Some(meta_type::>().into()), event: Some(meta_type::>().into()), constants: vec![], @@ -739,76 +871,24 @@ fn test_metadata() { }, PalletMetadata { name: "Module3", - storage: Some(PalletStorageMetadata { prefix: "Module3", entries: vec![] }), + storage: Some(PalletStorageMetadata { + prefix: "Module3", + entries: vec![ + StorageEntryMetadata { + name: "Storage", + modifier: Optional.into(), + ty: Plain(meta_type::().into()), + default: vec![0], + docs: vec![], + }, + ] + }), calls: Some(meta_type::>().into()), event: Some(meta_type::>().into()), constants: vec![], error: Some(meta_type::>().into()), index: 35, }, - PalletMetadata { - name: "Module1_3", - storage: Some(PalletStorageMetadata { prefix: "Module1_3", entries: vec![] }), - calls: None, - event: Some(meta_type::>().into()), - constants: vec![], - error: Some(meta_type::>().into()), - index: 6, - }, - PalletMetadata { - name: "Module1_4", - storage: None, - calls: Some(meta_type::>().into()), - event: Some(meta_type::>().into()), - constants: vec![], - error: Some(meta_type::>().into()), - index: 3, - }, - PalletMetadata { - name: "Module1_5", - storage: None, - calls: None, - event: Some(meta_type::>().into()), - constants: vec![], - error: Some(meta_type::>().into()), - index: 4, - }, - PalletMetadata { - name: "Module1_6", - storage: Some(PalletStorageMetadata { prefix: "Module1_6", entries: vec![] }), - calls: Some(meta_type::>().into()), - event: Some(meta_type::>().into()), - constants: vec![], - error: Some(meta_type::>().into()), - index: 1, - }, - PalletMetadata { - name: "Module1_7", - storage: Some(PalletStorageMetadata { prefix: "Module1_7", entries: vec![] }), - calls: Some(meta_type::>().into()), - event: Some(meta_type::>().into()), - constants: vec![], - error: Some(meta_type::>().into()), - index: 2, - }, - PalletMetadata { - name: "Module1_8", - storage: Some(PalletStorageMetadata { prefix: "Module1_8", entries: vec![] }), - calls: Some(meta_type::>().into()), - event: Some(meta_type::>().into()), - constants: vec![], - error: Some(meta_type::>().into()), - index: 12, - }, - PalletMetadata { - name: "Module1_9", - storage: Some(PalletStorageMetadata { prefix: "Module1_9", entries: vec![] }), - calls: Some(meta_type::>().into()), - event: Some(meta_type::>().into()), - constants: vec![], - error: Some(meta_type::>().into()), - index: 13, - }, ]; let extrinsic = ExtrinsicMetadata { @@ -895,3 +975,19 @@ fn pallet_in_runtime_is_correct() { assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); assert!(PalletInfo::crate_version::().is_some()); } + +#[test] +fn test_validate_unsigned() { + use frame_support::pallet_prelude::*; + + let call = RuntimeCall::NestedModule3(nested::module3::Call::fail {}); + let validity = Runtime::validate_unsigned(TransactionSource::Local, &call).unwrap_err(); + assert_eq!(validity, TransactionValidityError::Invalid(InvalidTransaction::Call)); + + let call = RuntimeCall::Module3(module3::Call::fail {}); + let validity = Runtime::validate_unsigned(TransactionSource::Local, &call).unwrap_err(); + assert_eq!( + validity, + TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator) + ); +} diff --git a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs new file mode 100644 index 00000000000..38875517018 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs @@ -0,0 +1,993 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! General tests for construct_runtime macro, test for: +//! * error declared with decl_error works +//! * integrity test is generated + +#![recursion_limit = "128"] + +use codec::MaxEncodedLen; +use frame_support::{ + derive_impl, parameter_types, traits::PalletInfo as _, weights::RuntimeDbWeight, +}; +use frame_system::limits::{BlockLength, BlockWeights}; +use scale_info::TypeInfo; +use sp_core::{sr25519, ConstU64}; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, ValidateUnsigned, Verify}, + DispatchError, ModuleError, +}; +use sp_version::RuntimeVersion; + +parameter_types! { + pub static IntegrityTestExec: u32 = 0; +} + +#[frame_support::pallet(dev_mode)] +mod module1 { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; + } + + #[pallet::call] + impl, I: 'static> Pallet { + pub fn fail(_origin: OriginFor) -> DispatchResult { + Err(Error::::Something.into()) + } + } + + #[pallet::origin] + #[derive(Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] + #[scale_info(skip_type_params(I))] + pub struct Origin(pub PhantomData<(T, I)>); + + #[pallet::event] + pub enum Event, I: 'static = ()> { + A(::AccountId), + } + + #[pallet::error] + pub enum Error { + Something, + } +} + +#[frame_support::pallet(dev_mode)] +mod module2 { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + IntegrityTestExec::mutate(|i| *i += 1); + } + } + + #[pallet::call] + impl Pallet { + pub fn fail(_origin: OriginFor) -> DispatchResult { + Err(Error::::Something.into()) + } + } + + #[pallet::origin] + #[derive(Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub struct Origin; + + #[pallet::event] + pub enum Event { + A, + } + + #[pallet::error] + pub enum Error { + Something, + } +} + +mod nested { + use super::*; + + #[frame_support::pallet(dev_mode)] + pub mod module3 { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + IntegrityTestExec::mutate(|i| *i += 1); + } + } + + #[pallet::call] + impl Pallet { + pub fn fail(_origin: OriginFor) -> DispatchResult { + Err(Error::::Something.into()) + } + } + + #[pallet::origin] + #[derive(Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub struct Origin; + + #[pallet::event] + pub enum Event { + A, + } + + #[pallet::error] + pub enum Error { + Something, + } + + #[pallet::genesis_config] + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) {} + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned( + _source: TransactionSource, + _call: &Self::Call, + ) -> TransactionValidity { + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + } + } + } +} + +#[frame_support::pallet(dev_mode)] +pub mod module3 { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } + + #[pallet::call] + impl Pallet { + pub fn fail(_origin: OriginFor) -> DispatchResult { + Err(Error::::Something.into()) + } + pub fn aux_1(_origin: OriginFor, #[pallet::compact] _data: u32) -> DispatchResult { + unreachable!() + } + pub fn aux_2( + _origin: OriginFor, + _data: i32, + #[pallet::compact] _data2: u32, + ) -> DispatchResult { + unreachable!() + } + #[pallet::weight(0)] + pub fn aux_3(_origin: OriginFor, _data: i32, _data2: String) -> DispatchResult { + unreachable!() + } + #[pallet::weight(3)] + pub fn aux_4(_origin: OriginFor) -> DispatchResult { + unreachable!() + } + #[pallet::weight((5, DispatchClass::Operational))] + pub fn operational(_origin: OriginFor) -> DispatchResult { + unreachable!() + } + } + + #[pallet::origin] + #[derive(Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub struct Origin(pub PhantomData); + + #[pallet::event] + pub enum Event { + A, + } + + #[pallet::error] + pub enum Error { + Something, + } + + #[pallet::genesis_config] + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { + #[serde(skip)] + pub _config: sp_std::marker::PhantomData, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) {} + } + + #[pallet::storage] + pub type Storage = StorageValue<_, u32>; + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned( + _source: TransactionSource, + _call: &Self::Call, + ) -> TransactionValidity { + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + } + } +} + +pub type BlockNumber = u64; +pub type Signature = sr25519::Signature; +pub type AccountId = ::Signer; +pub type Header = generic::Header; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type Block = generic::Block; + +#[frame_support::runtime(legacy_ordering)] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + #[runtime::pallet_index(30)] + pub type System = frame_system + Pallet + Call + Event + Origin; + + #[runtime::pallet_index(31)] + pub type Module1_1 = module1; + + #[runtime::pallet_index(32)] + pub type Module2 = module2; + + #[runtime::pallet_index(33)] + pub type Module1_2 = module1; + + #[runtime::pallet_index(34)] + pub type NestedModule3 = nested::module3; + + #[runtime::pallet_index(35)] + #[runtime::disable_unsigned] + pub type Module3 = self::module3; + + #[runtime::pallet_index(6)] + #[runtime::disable_call] + pub type Module1_3 = module1; + + #[runtime::pallet_index(3)] + pub type Module1_4 = module1; + + #[runtime::pallet_index(4)] + #[runtime::disable_call] + pub type Module1_5 = module1; + + #[runtime::pallet_index(1)] + pub type Module1_6 = module1; + + #[runtime::pallet_index(2)] + pub type Module1_7 = module1; + + #[runtime::pallet_index(12)] + pub type Module1_8 = module1; + + #[runtime::pallet_index(13)] + pub type Module1_9 = module1; +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type AccountId = AccountId; + type Lookup = sp_runtime::traits::IdentityLookup; + type BaseCallFilter = frame_support::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type PalletInfo = PalletInfo; + type OnSetCode = (); + type Block = Block; + type BlockHashCount = ConstU64<10>; +} + +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module1::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module2::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl nested::module3::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} +impl module3::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + +fn test_pub() -> AccountId { + AccountId::from_raw([0; 32]) +} + +#[test] +fn check_modules_error_type() { + sp_io::TestExternalities::default().execute_with(|| { + assert_eq!( + Module1_1::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 31, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module2::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 32, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_2::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 33, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + NestedModule3::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 34, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_3::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 6, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_4::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 3, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_5::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 4, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_6::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 1, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_7::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 2, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_8::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 12, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_9::fail(frame_system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 13, + error: [0; 4], + message: Some("Something") + })), + ); + }) +} + +#[test] +fn integrity_test_works() { + __construct_runtime_integrity_test::runtime_integrity_tests(); + assert_eq!(IntegrityTestExec::get(), 2); +} + +#[test] +fn origin_codec() { + use codec::Encode; + + let origin = OriginCaller::system(frame_system::RawOrigin::None); + assert_eq!(origin.encode()[0], 30); + + let origin = OriginCaller::Module1_1(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 31); + + let origin = OriginCaller::Module2(module2::Origin); + assert_eq!(origin.encode()[0], 32); + + let origin = OriginCaller::Module1_2(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 33); + + let origin = OriginCaller::NestedModule3(nested::module3::Origin); + assert_eq!(origin.encode()[0], 34); + + let origin = OriginCaller::Module3(module3::Origin(Default::default())); + assert_eq!(origin.encode()[0], 35); + + let origin = OriginCaller::Module1_6(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 1); + + let origin = OriginCaller::Module1_7(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 2); + + let origin = OriginCaller::Module1_8(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 12); + + let origin = OriginCaller::Module1_9(module1::Origin(Default::default())); + assert_eq!(origin.encode()[0], 13); +} + +#[test] +fn event_codec() { + use codec::Encode; + + let event = + frame_system::Event::::ExtrinsicSuccess { dispatch_info: Default::default() }; + assert_eq!(RuntimeEvent::from(event).encode()[0], 30); + + let event = module1::Event::::A(test_pub()); + assert_eq!(RuntimeEvent::from(event).encode()[0], 31); + + let event = module2::Event::A; + assert_eq!(RuntimeEvent::from(event).encode()[0], 32); + + let event = module1::Event::::A(test_pub()); + assert_eq!(RuntimeEvent::from(event).encode()[0], 33); + + let event = nested::module3::Event::A; + assert_eq!(RuntimeEvent::from(event).encode()[0], 34); + + let event = module3::Event::A; + assert_eq!(RuntimeEvent::from(event).encode()[0], 35); + + let event = module1::Event::::A(test_pub()); + assert_eq!(RuntimeEvent::from(event).encode()[0], 4); + + let event = module1::Event::::A(test_pub()); + assert_eq!(RuntimeEvent::from(event).encode()[0], 1); + + let event = module1::Event::::A(test_pub()); + assert_eq!(RuntimeEvent::from(event).encode()[0], 2); + + let event = module1::Event::::A(test_pub()); + assert_eq!(RuntimeEvent::from(event).encode()[0], 12); + + let event = module1::Event::::A(test_pub()); + assert_eq!(RuntimeEvent::from(event).encode()[0], 13); +} + +#[test] +fn call_codec() { + use codec::Encode; + assert_eq!(RuntimeCall::System(frame_system::Call::remark { remark: vec![1] }).encode()[0], 30); + assert_eq!(RuntimeCall::Module1_1(module1::Call::fail {}).encode()[0], 31); + assert_eq!(RuntimeCall::Module2(module2::Call::fail {}).encode()[0], 32); + assert_eq!(RuntimeCall::Module1_2(module1::Call::fail {}).encode()[0], 33); + assert_eq!(RuntimeCall::NestedModule3(nested::module3::Call::fail {}).encode()[0], 34); + assert_eq!(RuntimeCall::Module3(module3::Call::fail {}).encode()[0], 35); + assert_eq!(RuntimeCall::Module1_4(module1::Call::fail {}).encode()[0], 3); + assert_eq!(RuntimeCall::Module1_6(module1::Call::fail {}).encode()[0], 1); + assert_eq!(RuntimeCall::Module1_7(module1::Call::fail {}).encode()[0], 2); + assert_eq!(RuntimeCall::Module1_8(module1::Call::fail {}).encode()[0], 12); + assert_eq!(RuntimeCall::Module1_9(module1::Call::fail {}).encode()[0], 13); +} + +#[test] +fn call_compact_attr() { + use codec::Encode; + let call: module3::Call = module3::Call::aux_1 { data: 1 }; + let encoded = call.encode(); + assert_eq!(2, encoded.len()); + assert_eq!(vec![1, 4], encoded); + + let call: module3::Call = module3::Call::aux_2 { data: 1, data2: 2 }; + let encoded = call.encode(); + assert_eq!(6, encoded.len()); + assert_eq!(vec![2, 1, 0, 0, 0, 8], encoded); +} + +#[test] +fn call_encode_is_correct_and_decode_works() { + use codec::{Decode, Encode}; + let call: module3::Call = module3::Call::fail {}; + let encoded = call.encode(); + assert_eq!(vec![0], encoded); + let decoded = module3::Call::::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded, call); + + let call: module3::Call = module3::Call::aux_3 { data: 32, data2: "hello".into() }; + let encoded = call.encode(); + assert_eq!(vec![3, 32, 0, 0, 0, 20, 104, 101, 108, 108, 111], encoded); + let decoded = module3::Call::::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded, call); +} + +#[test] +fn call_weight_should_attach_to_call_enum() { + use frame_support::{ + dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays}, + weights::Weight, + }; + // operational. + assert_eq!( + module3::Call::::operational {}.get_dispatch_info(), + DispatchInfo { + weight: Weight::from_parts(5, 0), + class: DispatchClass::Operational, + pays_fee: Pays::Yes + }, + ); + // custom basic + assert_eq!( + module3::Call::::aux_4 {}.get_dispatch_info(), + DispatchInfo { + weight: Weight::from_parts(3, 0), + class: DispatchClass::Normal, + pays_fee: Pays::Yes + }, + ); +} + +#[test] +fn call_name() { + use frame_support::traits::GetCallName; + let name = module3::Call::::aux_4 {}.get_call_name(); + assert_eq!("aux_4", name); +} + +#[test] +fn call_metadata() { + use frame_support::traits::{CallMetadata, GetCallMetadata}; + let call = RuntimeCall::Module3(module3::Call::::aux_4 {}); + let metadata = call.get_call_metadata(); + let expected = CallMetadata { function_name: "aux_4".into(), pallet_name: "Module3".into() }; + assert_eq!(metadata, expected); +} + +#[test] +fn get_call_names() { + use frame_support::traits::GetCallName; + let call_names = module3::Call::::get_call_names(); + assert_eq!(["fail", "aux_1", "aux_2", "aux_3", "aux_4", "operational"], call_names); +} + +#[test] +fn get_module_names() { + use frame_support::traits::GetCallMetadata; + let module_names = RuntimeCall::get_module_names(); + assert_eq!( + [ + "System", + "Module1_1", + "Module2", + "Module1_2", + "NestedModule3", + "Module3", + "Module1_4", + "Module1_6", + "Module1_7", + "Module1_8", + "Module1_9", + ], + module_names + ); +} + +#[test] +fn call_subtype_conversion() { + use frame_support::{dispatch::CallableCallFor, traits::IsSubType}; + let call = RuntimeCall::Module3(module3::Call::::fail {}); + let subcall: Option<&CallableCallFor> = call.is_sub_type(); + let subcall_none: Option<&CallableCallFor> = call.is_sub_type(); + assert_eq!(Some(&module3::Call::::fail {}), subcall); + assert_eq!(None, subcall_none); + + let from = RuntimeCall::from(subcall.unwrap().clone()); + assert_eq!(from, call); +} + +#[test] +fn test_metadata() { + use frame_metadata::{ + v14::{StorageEntryType::Plain, *}, + *, + }; + use scale_info::meta_type; + use sp_core::Encode; + use sp_metadata_ir::StorageEntryModifierIR::Optional; + + fn maybe_docs(doc: Vec<&'static str>) -> Vec<&'static str> { + if cfg!(feature = "no-metadata-docs") { + vec![] + } else { + doc + } + } + + let pallets = vec![ + PalletMetadata { + name: "System", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![ + PalletConstantMetadata { + name: "BlockWeights", + ty: meta_type::(), + value: BlockWeights::default().encode(), + docs: maybe_docs(vec![" Block & extrinsics weights: base values and limits."]), + }, + PalletConstantMetadata { + name: "BlockLength", + ty: meta_type::(), + value: BlockLength::default().encode(), + docs: maybe_docs(vec![" The maximum length of a block (in bytes)."]), + }, + PalletConstantMetadata { + name: "BlockHashCount", + ty: meta_type::(), + value: 10u64.encode(), + docs: maybe_docs(vec![" Maximum number of block number to block hash mappings to keep (oldest pruned first)."]), + }, + PalletConstantMetadata { + name: "DbWeight", + ty: meta_type::(), + value: RuntimeDbWeight::default().encode(), + docs: maybe_docs(vec![" The weight of runtime database operations the runtime can invoke.",]), + }, + PalletConstantMetadata { + name: "Version", + ty: meta_type::(), + value: RuntimeVersion::default().encode(), + docs: maybe_docs(vec![ " Get the chain's current version."]), + }, + PalletConstantMetadata { + name: "SS58Prefix", + ty: meta_type::(), + value: 0u16.encode(), + docs: maybe_docs(vec![ + " The designated SS58 prefix of this chain.", + "", + " This replaces the \"ss58Format\" property declared in the chain spec. Reason is", + " that the runtime should know about the prefix in order to make use of it as", + " an identifier of the chain.", + ]), + }, + ], + error: Some(meta_type::>().into()), + index: 30, + }, + PalletMetadata { + name: "Module1_1", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 31, + }, + PalletMetadata { + name: "Module2", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 32, + }, + PalletMetadata { + name: "Module1_2", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 33, + }, + PalletMetadata { + name: "NestedModule3", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 34, + }, + PalletMetadata { + name: "Module3", + storage: Some(PalletStorageMetadata { + prefix: "Module3", + entries: vec![ + StorageEntryMetadata { + name: "Storage", + modifier: Optional.into(), + ty: Plain(meta_type::().into()), + default: vec![0], + docs: vec![], + }, + ] + }), + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 35, + }, + PalletMetadata { + name: "Module1_3", + storage: None, + calls: None, + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 6, + }, + PalletMetadata { + name: "Module1_4", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 3, + }, + PalletMetadata { + name: "Module1_5", + storage: None, + calls: None, + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 4, + }, + PalletMetadata { + name: "Module1_6", + storage:None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 1, + }, + PalletMetadata { + name: "Module1_7", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 2, + }, + PalletMetadata { + name: "Module1_8", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 12, + }, + PalletMetadata { + name: "Module1_9", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: Some(meta_type::>().into()), + index: 13, + }, + ]; + + let extrinsic = ExtrinsicMetadata { + ty: meta_type::(), + version: 4, + signed_extensions: vec![SignedExtensionMetadata { + identifier: "UnitTransactionExtension", + ty: meta_type::<()>(), + additional_signed: meta_type::<()>(), + }], + }; + + let expected_metadata: RuntimeMetadataPrefixed = + RuntimeMetadataLastVersion::new(pallets, extrinsic, meta_type::()).into(); + let actual_metadata = Runtime::metadata(); + + pretty_assertions::assert_eq!(actual_metadata, expected_metadata); +} + +#[test] +fn pallet_in_runtime_is_correct() { + assert_eq!(PalletInfo::index::().unwrap(), 30); + assert_eq!(PalletInfo::name::().unwrap(), "System"); + assert_eq!(PalletInfo::module_name::().unwrap(), "frame_system"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 31); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_1"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 32); + assert_eq!(PalletInfo::name::().unwrap(), "Module2"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module2"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 33); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_2"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 34); + assert_eq!(PalletInfo::name::().unwrap(), "NestedModule3"); + assert_eq!(PalletInfo::module_name::().unwrap(), "nested::module3"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 35); + assert_eq!(PalletInfo::name::().unwrap(), "Module3"); + assert_eq!(PalletInfo::module_name::().unwrap(), "self::module3"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 6); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_3"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 3); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_4"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 4); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_5"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 1); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_6"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 2); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_7"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 12); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_8"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); + + assert_eq!(PalletInfo::index::().unwrap(), 13); + assert_eq!(PalletInfo::name::().unwrap(), "Module1_9"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert!(PalletInfo::crate_version::().is_some()); +} + +#[test] +fn test_validate_unsigned() { + use frame_support::pallet_prelude::*; + + let call = RuntimeCall::NestedModule3(nested::module3::Call::fail {}); + let validity = Runtime::validate_unsigned(TransactionSource::Local, &call).unwrap_err(); + assert_eq!(validity, TransactionValidityError::Invalid(InvalidTransaction::Call)); + + let call = RuntimeCall::Module3(module3::Call::fail {}); + let validity = Runtime::validate_unsigned(TransactionSource::Local, &call).unwrap_err(); + assert_eq!( + validity, + TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator) + ); +} diff --git a/substrate/frame/support/test/tests/runtime_ui.rs b/substrate/frame/support/test/tests/runtime_ui.rs new file mode 100644 index 00000000000..dbe150f38ed --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui.rs @@ -0,0 +1,36 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[rustversion::attr(not(stable), ignore)] +#[cfg(not(feature = "disable-ui-tests"))] +#[test] +fn ui() { + // Only run the ui tests when `RUN_UI_TESTS` is set. + if std::env::var("RUN_UI_TESTS").is_err() { + return + } + + // As trybuild is using `cargo check`, we don't need the real WASM binaries. + std::env::set_var("SKIP_WASM_BUILD", "1"); + + // Deny all warnings since we emit warnings as part of a Runtime's UI. + std::env::set_var("RUSTFLAGS", "--deny warnings"); + + let t = trybuild::TestCases::new(); + t.compile_fail("tests/runtime_ui/*.rs"); + t.pass("tests/runtime_ui/pass/*.rs"); +} diff --git a/substrate/frame/support/test/tests/runtime_ui/can_only_be_attached_to_mod.rs b/substrate/frame/support/test/tests/runtime_ui/can_only_be_attached_to_mod.rs new file mode 100644 index 00000000000..eb586809041 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/can_only_be_attached_to_mod.rs @@ -0,0 +1,21 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::runtime] +fn construct_runtime() {} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/can_only_be_attached_to_mod.stderr b/substrate/frame/support/test/tests/runtime_ui/can_only_be_attached_to_mod.stderr new file mode 100644 index 00000000000..7c2f0d1f169 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/can_only_be_attached_to_mod.stderr @@ -0,0 +1,5 @@ +error: expected `mod` + --> tests/runtime_ui/can_only_be_attached_to_mod.rs:19:1 + | +19 | fn construct_runtime() {} + | ^^ diff --git a/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_index.rs b/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_index.rs new file mode 100644 index 00000000000..648812b52c4 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_index.rs @@ -0,0 +1,43 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet {} +} + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive(RuntimeCall)] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = frame_system; + + #[runtime::pallet_index(0)] + pub type Pallet = pallet; +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_index.stderr b/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_index.stderr new file mode 100644 index 00000000000..8963c5d3f26 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_index.stderr @@ -0,0 +1,11 @@ +error: Pallet indices are conflicting: Both pallets System and Pallet are at index 0 + --> tests/runtime_ui/conflicting_pallet_index.rs:37:14 + | +37 | pub type System = frame_system; + | ^^^^^^ + +error: Pallet indices are conflicting: Both pallets System and Pallet are at index 0 + --> tests/runtime_ui/conflicting_pallet_index.rs:40:14 + | +40 | pub type Pallet = pallet; + | ^^^^^^ diff --git a/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_name.rs b/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_name.rs new file mode 100644 index 00000000000..c68f9a8082b --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_name.rs @@ -0,0 +1,43 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet {} +} + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive(RuntimeCall)] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = frame_system; + + #[runtime::pallet_index(1)] + pub type System = pallet; +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_name.stderr b/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_name.stderr new file mode 100644 index 00000000000..c250c24e3e7 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/conflicting_pallet_name.stderr @@ -0,0 +1,11 @@ +error: Two pallets with the same name! + --> tests/runtime_ui/conflicting_pallet_name.rs:37:14 + | +37 | pub type System = frame_system; + | ^^^^^^ + +error: Two pallets with the same name! + --> tests/runtime_ui/conflicting_pallet_name.rs:40:14 + | +40 | pub type System = pallet; + | ^^^^^^ diff --git a/substrate/frame/support/test/tests/runtime_ui/invalid_attribute.rs b/substrate/frame/support/test/tests/runtime_ui/invalid_attribute.rs new file mode 100644 index 00000000000..5df2491cb24 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/invalid_attribute.rs @@ -0,0 +1,23 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::runtime(dummy)] +mod runtime { + +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/invalid_attribute.stderr b/substrate/frame/support/test/tests/runtime_ui/invalid_attribute.stderr new file mode 100644 index 00000000000..e13552413c0 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/invalid_attribute.stderr @@ -0,0 +1,5 @@ +error: Invalid runtime macro call: unexpected attribute. Macro call must be bare, such as `#[frame_support::runtime]` or `#[runtime]`, or must specify the `legacy_ordering` attribute, such as `#[frame_support::runtime(legacy_ordering)]` or #[runtime(legacy_ordering)]. + --> tests/runtime_ui/invalid_attribute.rs:18:26 + | +18 | #[frame_support::runtime(dummy)] + | ^^^^^ diff --git a/substrate/frame/support/test/tests/runtime_ui/invalid_pallet_index.rs b/substrate/frame/support/test/tests/runtime_ui/invalid_pallet_index.rs new file mode 100644 index 00000000000..de89bb60224 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/invalid_pallet_index.rs @@ -0,0 +1,28 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive(RuntimeCall)] + pub struct Runtime; + + #[runtime::pallet_index("0")] + pub type System = frame_system; +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/invalid_pallet_index.stderr b/substrate/frame/support/test/tests/runtime_ui/invalid_pallet_index.stderr new file mode 100644 index 00000000000..1fbd086ddbe --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/invalid_pallet_index.stderr @@ -0,0 +1,5 @@ +error: expected integer literal + --> tests/runtime_ui/invalid_pallet_index.rs:24:29 + | +24 | #[runtime::pallet_index("0")] + | ^^^ diff --git a/substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.rs b/substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.rs new file mode 100644 index 00000000000..89ba2f23bb1 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.rs @@ -0,0 +1,25 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive(RuntimeInfo)] + pub struct Runtime; +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.stderr b/substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.stderr new file mode 100644 index 00000000000..0b128c3dd45 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.stderr @@ -0,0 +1,5 @@ +error: expected one of: `RuntimeCall`, `RuntimeEvent`, `RuntimeError`, `RuntimeOrigin`, `RuntimeFreezeReason`, `RuntimeHoldReason`, `RuntimeSlashReason`, `RuntimeLockId`, `RuntimeTask` + --> tests/runtime_ui/invalid_runtime_type_derive.rs:21:23 + | +21 | #[runtime::derive(RuntimeInfo)] + | ^^^^^^^^^^^ diff --git a/substrate/frame/support/test/tests/runtime_ui/missing_runtime.rs b/substrate/frame/support/test/tests/runtime_ui/missing_runtime.rs new file mode 100644 index 00000000000..c5febbe5287 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/missing_runtime.rs @@ -0,0 +1,21 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::runtime] +mod runtime {} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/missing_runtime.stderr b/substrate/frame/support/test/tests/runtime_ui/missing_runtime.stderr new file mode 100644 index 00000000000..9790c5dbc1a --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/missing_runtime.stderr @@ -0,0 +1,5 @@ +error: Missing Runtime. Please add a struct inside the module and annotate it with `#[runtime::runtime]` + --> tests/runtime_ui/missing_runtime.rs:19:1 + | +19 | mod runtime {} + | ^^^ diff --git a/substrate/frame/support/test/tests/runtime_ui/missing_runtime_types_derive.rs b/substrate/frame/support/test/tests/runtime_ui/missing_runtime_types_derive.rs new file mode 100644 index 00000000000..c3fb0fd454f --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/missing_runtime_types_derive.rs @@ -0,0 +1,24 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + pub struct Runtime; +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/missing_runtime_types_derive.stderr b/substrate/frame/support/test/tests/runtime_ui/missing_runtime_types_derive.stderr new file mode 100644 index 00000000000..1a8deebe549 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/missing_runtime_types_derive.stderr @@ -0,0 +1,5 @@ +error: Missing Runtime Types. Please annotate the runtime struct with `#[runtime::derive]` + --> tests/runtime_ui/missing_runtime_types_derive.rs:19:1 + | +19 | mod runtime { + | ^^^ diff --git a/substrate/frame/support/test/tests/runtime_ui/missing_system_pallet.rs b/substrate/frame/support/test/tests/runtime_ui/missing_system_pallet.rs new file mode 100644 index 00000000000..f9a15fcf992 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/missing_system_pallet.rs @@ -0,0 +1,25 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive(RuntimeCall)] + pub struct Runtime; +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/missing_system_pallet.stderr b/substrate/frame/support/test/tests/runtime_ui/missing_system_pallet.stderr new file mode 100644 index 00000000000..dc3ac4cd5d5 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/missing_system_pallet.stderr @@ -0,0 +1,5 @@ +error: `System` pallet declaration is missing. Please add this line: `pub type System = frame_system;` + --> tests/runtime_ui/missing_system_pallet.rs:19:5 + | +19 | mod runtime { + | ^^^^^^^ diff --git a/substrate/frame/support/test/tests/runtime_ui/pass/basic.rs b/substrate/frame/support/test/tests/runtime_ui/pass/basic.rs new file mode 100644 index 00000000000..514f1501801 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/pass/basic.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::derive_impl; + +pub type Block = frame_system::mocking::MockBlock; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; +} + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive(RuntimeCall, RuntimeEvent, RuntimeOrigin, RuntimeError, RuntimeTask)] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = frame_system; +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/runtime_struct.rs b/substrate/frame/support/test/tests/runtime_ui/runtime_struct.rs new file mode 100644 index 00000000000..bfa2fb75f04 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/runtime_struct.rs @@ -0,0 +1,24 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + pub enum Runtime {} +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/runtime_struct.stderr b/substrate/frame/support/test/tests/runtime_ui/runtime_struct.stderr new file mode 100644 index 00000000000..04192bcf7f1 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/runtime_struct.stderr @@ -0,0 +1,5 @@ +error: Invalid runtime::runtime, expected struct definition + --> tests/runtime_ui/runtime_struct.rs:21:5 + | +21 | pub enum Runtime {} + | ^^^ diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml index 4f22e7ff6a3..706ea863d0f 100644 --- a/templates/solochain/runtime/Cargo.toml +++ b/templates/solochain/runtime/Cargo.toml @@ -25,7 +25,7 @@ scale-info = { version = "2.10.0", default-features = false, features = [ ] } # frame -frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["experimental"] } frame-system = { path = "../../../substrate/frame/system", default-features = false } frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } diff --git a/templates/solochain/runtime/src/lib.rs b/templates/solochain/runtime/src/lib.rs index 409b639f021..303122da564 100644 --- a/templates/solochain/runtime/src/lib.rs +++ b/templates/solochain/runtime/src/lib.rs @@ -253,20 +253,47 @@ impl pallet_template::Config for Runtime { } // Create the runtime by composing the FRAME pallets that were previously configured. -construct_runtime!( - pub enum Runtime { - System: frame_system, - Timestamp: pallet_timestamp, - Aura: pallet_aura, - Grandpa: pallet_grandpa, - Balances: pallet_balances, - TransactionPayment: pallet_transaction_payment, - Sudo: pallet_sudo, - - // Include the custom logic from the pallet-template in the runtime. - TemplateModule: pallet_template, - } -); +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = frame_system; + + #[runtime::pallet_index(1)] + pub type Timestamp = pallet_timestamp; + + #[runtime::pallet_index(2)] + pub type Aura = pallet_aura; + + #[runtime::pallet_index(3)] + pub type Grandpa = pallet_grandpa; + + #[runtime::pallet_index(4)] + pub type Balances = pallet_balances; + + #[runtime::pallet_index(5)] + pub type TransactionPayment = pallet_transaction_payment; + + #[runtime::pallet_index(6)] + pub type Sudo = pallet_sudo; + + // Include the custom logic from the pallet-template in the runtime. + #[runtime::pallet_index(7)] + pub type TemplateModule = pallet_template; +} /// The address format for describing accounts. pub type Address = sp_runtime::MultiAddress; -- GitLab